]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/aarch64-tdep.c
GDB copyright headers update after running GDB's copyright.py script.
[thirdparty/binutils-gdb.git] / gdb / aarch64-tdep.c
CommitLineData
07b287a0
MS
1/* Common target dependent code for GDB on AArch64 systems.
2
618f726f 3 Copyright (C) 2009-2016 Free Software Foundation, Inc.
07b287a0
MS
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21#include "defs.h"
22
23#include "frame.h"
24#include "inferior.h"
25#include "gdbcmd.h"
26#include "gdbcore.h"
07b287a0
MS
27#include "dis-asm.h"
28#include "regcache.h"
29#include "reggroups.h"
30#include "doublest.h"
31#include "value.h"
32#include "arch-utils.h"
33#include "osabi.h"
34#include "frame-unwind.h"
35#include "frame-base.h"
36#include "trad-frame.h"
37#include "objfiles.h"
38#include "dwarf2-frame.h"
39#include "gdbtypes.h"
40#include "prologue-value.h"
41#include "target-descriptions.h"
42#include "user-regs.h"
43#include "language.h"
44#include "infcall.h"
ea873d8e
PL
45#include "ax.h"
46#include "ax-gdb.h"
07b287a0
MS
47
48#include "aarch64-tdep.h"
49
50#include "elf-bfd.h"
51#include "elf/aarch64.h"
52
07b287a0
MS
53#include "vec.h"
54
99afc88b
OJ
55#include "record.h"
56#include "record-full.h"
57
07b287a0 58#include "features/aarch64.c"
07b287a0 59
787749ea
PL
60#include "arch/aarch64-insn.h"
61
f77ee802
YQ
62#include "opcode/aarch64.h"
63
64#define submask(x) ((1L << ((x) + 1)) - 1)
65#define bit(obj,st) (((obj) >> (st)) & 1)
66#define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
67
07b287a0
MS
68/* Pseudo register base numbers. */
69#define AARCH64_Q0_REGNUM 0
70#define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + 32)
71#define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
72#define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
73#define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
74
75/* The standard register names, and all the valid aliases for them. */
76static const struct
77{
78 const char *const name;
79 int regnum;
80} aarch64_register_aliases[] =
81{
82 /* 64-bit register names. */
83 {"fp", AARCH64_FP_REGNUM},
84 {"lr", AARCH64_LR_REGNUM},
85 {"sp", AARCH64_SP_REGNUM},
86
87 /* 32-bit register names. */
88 {"w0", AARCH64_X0_REGNUM + 0},
89 {"w1", AARCH64_X0_REGNUM + 1},
90 {"w2", AARCH64_X0_REGNUM + 2},
91 {"w3", AARCH64_X0_REGNUM + 3},
92 {"w4", AARCH64_X0_REGNUM + 4},
93 {"w5", AARCH64_X0_REGNUM + 5},
94 {"w6", AARCH64_X0_REGNUM + 6},
95 {"w7", AARCH64_X0_REGNUM + 7},
96 {"w8", AARCH64_X0_REGNUM + 8},
97 {"w9", AARCH64_X0_REGNUM + 9},
98 {"w10", AARCH64_X0_REGNUM + 10},
99 {"w11", AARCH64_X0_REGNUM + 11},
100 {"w12", AARCH64_X0_REGNUM + 12},
101 {"w13", AARCH64_X0_REGNUM + 13},
102 {"w14", AARCH64_X0_REGNUM + 14},
103 {"w15", AARCH64_X0_REGNUM + 15},
104 {"w16", AARCH64_X0_REGNUM + 16},
105 {"w17", AARCH64_X0_REGNUM + 17},
106 {"w18", AARCH64_X0_REGNUM + 18},
107 {"w19", AARCH64_X0_REGNUM + 19},
108 {"w20", AARCH64_X0_REGNUM + 20},
109 {"w21", AARCH64_X0_REGNUM + 21},
110 {"w22", AARCH64_X0_REGNUM + 22},
111 {"w23", AARCH64_X0_REGNUM + 23},
112 {"w24", AARCH64_X0_REGNUM + 24},
113 {"w25", AARCH64_X0_REGNUM + 25},
114 {"w26", AARCH64_X0_REGNUM + 26},
115 {"w27", AARCH64_X0_REGNUM + 27},
116 {"w28", AARCH64_X0_REGNUM + 28},
117 {"w29", AARCH64_X0_REGNUM + 29},
118 {"w30", AARCH64_X0_REGNUM + 30},
119
120 /* specials */
121 {"ip0", AARCH64_X0_REGNUM + 16},
122 {"ip1", AARCH64_X0_REGNUM + 17}
123};
124
125/* The required core 'R' registers. */
126static const char *const aarch64_r_register_names[] =
127{
128 /* These registers must appear in consecutive RAW register number
129 order and they must begin with AARCH64_X0_REGNUM! */
130 "x0", "x1", "x2", "x3",
131 "x4", "x5", "x6", "x7",
132 "x8", "x9", "x10", "x11",
133 "x12", "x13", "x14", "x15",
134 "x16", "x17", "x18", "x19",
135 "x20", "x21", "x22", "x23",
136 "x24", "x25", "x26", "x27",
137 "x28", "x29", "x30", "sp",
138 "pc", "cpsr"
139};
140
141/* The FP/SIMD 'V' registers. */
142static const char *const aarch64_v_register_names[] =
143{
144 /* These registers must appear in consecutive RAW register number
145 order and they must begin with AARCH64_V0_REGNUM! */
146 "v0", "v1", "v2", "v3",
147 "v4", "v5", "v6", "v7",
148 "v8", "v9", "v10", "v11",
149 "v12", "v13", "v14", "v15",
150 "v16", "v17", "v18", "v19",
151 "v20", "v21", "v22", "v23",
152 "v24", "v25", "v26", "v27",
153 "v28", "v29", "v30", "v31",
154 "fpsr",
155 "fpcr"
156};
157
158/* AArch64 prologue cache structure. */
159struct aarch64_prologue_cache
160{
db634143
PL
161 /* The program counter at the start of the function. It is used to
162 identify this frame as a prologue frame. */
163 CORE_ADDR func;
164
165 /* The program counter at the time this frame was created; i.e. where
166 this function was called from. It is used to identify this frame as a
167 stub frame. */
168 CORE_ADDR prev_pc;
169
07b287a0
MS
170 /* The stack pointer at the time this frame was created; i.e. the
171 caller's stack pointer when this function was called. It is used
172 to identify this frame. */
173 CORE_ADDR prev_sp;
174
7dfa3edc
PL
175 /* Is the target available to read from? */
176 int available_p;
177
07b287a0
MS
178 /* The frame base for this frame is just prev_sp - frame size.
179 FRAMESIZE is the distance from the frame pointer to the
180 initial stack pointer. */
181 int framesize;
182
183 /* The register used to hold the frame pointer for this frame. */
184 int framereg;
185
186 /* Saved register offsets. */
187 struct trad_frame_saved_reg *saved_regs;
188};
189
07b287a0
MS
190static void
191show_aarch64_debug (struct ui_file *file, int from_tty,
192 struct cmd_list_element *c, const char *value)
193{
194 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
195}
196
07b287a0
MS
197/* Analyze a prologue, looking for a recognizable stack frame
198 and frame pointer. Scan until we encounter a store that could
199 clobber the stack frame unexpectedly, or an unknown instruction. */
200
201static CORE_ADDR
202aarch64_analyze_prologue (struct gdbarch *gdbarch,
203 CORE_ADDR start, CORE_ADDR limit,
204 struct aarch64_prologue_cache *cache)
205{
206 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
207 int i;
208 pv_t regs[AARCH64_X_REGISTER_COUNT];
209 struct pv_area *stack;
210 struct cleanup *back_to;
211
212 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
213 regs[i] = pv_register (i, 0);
214 stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
215 back_to = make_cleanup_free_pv_area (stack);
216
217 for (; start < limit; start += 4)
218 {
219 uint32_t insn;
d9ebcbce 220 aarch64_inst inst;
07b287a0
MS
221
222 insn = read_memory_unsigned_integer (start, 4, byte_order_for_code);
223
d9ebcbce
YQ
224 if (aarch64_decode_insn (insn, &inst, 1) != 0)
225 break;
226
227 if (inst.opcode->iclass == addsub_imm
228 && (inst.opcode->op == OP_ADD
229 || strcmp ("sub", inst.opcode->name) == 0))
07b287a0 230 {
d9ebcbce
YQ
231 unsigned rd = inst.operands[0].reg.regno;
232 unsigned rn = inst.operands[1].reg.regno;
233
234 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
235 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
236 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
237 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
238
239 if (inst.opcode->op == OP_ADD)
240 {
241 regs[rd] = pv_add_constant (regs[rn],
242 inst.operands[2].imm.value);
243 }
244 else
245 {
246 regs[rd] = pv_add_constant (regs[rn],
247 -inst.operands[2].imm.value);
248 }
249 }
250 else if (inst.opcode->iclass == pcreladdr
251 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
252 {
253 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
254 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
255
256 regs[inst.operands[0].reg.regno] = pv_unknown ();
07b287a0 257 }
d9ebcbce 258 else if (inst.opcode->iclass == branch_imm)
07b287a0
MS
259 {
260 /* Stop analysis on branch. */
261 break;
262 }
d9ebcbce 263 else if (inst.opcode->iclass == condbranch)
07b287a0
MS
264 {
265 /* Stop analysis on branch. */
266 break;
267 }
d9ebcbce 268 else if (inst.opcode->iclass == branch_reg)
07b287a0
MS
269 {
270 /* Stop analysis on branch. */
271 break;
272 }
d9ebcbce 273 else if (inst.opcode->iclass == compbranch)
07b287a0
MS
274 {
275 /* Stop analysis on branch. */
276 break;
277 }
d9ebcbce
YQ
278 else if (inst.opcode->op == OP_MOVZ)
279 {
280 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
281 regs[inst.operands[0].reg.regno] = pv_unknown ();
282 }
283 else if (inst.opcode->iclass == log_shift
284 && strcmp (inst.opcode->name, "orr") == 0)
07b287a0 285 {
d9ebcbce
YQ
286 unsigned rd = inst.operands[0].reg.regno;
287 unsigned rn = inst.operands[1].reg.regno;
288 unsigned rm = inst.operands[2].reg.regno;
289
290 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
291 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
292 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
293
294 if (inst.operands[2].shifter.amount == 0
295 && rn == AARCH64_SP_REGNUM)
07b287a0
MS
296 regs[rd] = regs[rm];
297 else
298 {
299 if (aarch64_debug)
b277c936
PL
300 {
301 debug_printf ("aarch64: prologue analysis gave up "
302 "addr=0x%s opcode=0x%x (orr x register)\n",
303 core_addr_to_string_nz (start), insn);
304 }
07b287a0
MS
305 break;
306 }
307 }
d9ebcbce 308 else if (inst.opcode->op == OP_STUR)
07b287a0 309 {
d9ebcbce
YQ
310 unsigned rt = inst.operands[0].reg.regno;
311 unsigned rn = inst.operands[1].addr.base_regno;
312 int is64
313 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
314
315 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
316 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
317 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
318 gdb_assert (!inst.operands[1].addr.offset.is_reg);
319
320 pv_area_store (stack, pv_add_constant (regs[rn],
321 inst.operands[1].addr.offset.imm),
07b287a0
MS
322 is64 ? 8 : 4, regs[rt]);
323 }
d9ebcbce
YQ
324 else if ((inst.opcode->iclass == ldstpair_off
325 || inst.opcode->iclass == ldstpair_indexed)
326 && inst.operands[2].addr.preind
327 && strcmp ("stp", inst.opcode->name) == 0)
07b287a0 328 {
d9ebcbce
YQ
329 unsigned rt1 = inst.operands[0].reg.regno;
330 unsigned rt2 = inst.operands[1].reg.regno;
331 unsigned rn = inst.operands[2].addr.base_regno;
332 int32_t imm = inst.operands[2].addr.offset.imm;
333
334 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
335 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2);
336 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
337 gdb_assert (!inst.operands[2].addr.offset.is_reg);
338
07b287a0
MS
339 /* If recording this store would invalidate the store area
340 (perhaps because rn is not known) then we should abandon
341 further prologue analysis. */
342 if (pv_area_store_would_trash (stack,
343 pv_add_constant (regs[rn], imm)))
344 break;
345
346 if (pv_area_store_would_trash (stack,
347 pv_add_constant (regs[rn], imm + 8)))
348 break;
349
350 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
351 regs[rt1]);
352 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
353 regs[rt2]);
14ac654f 354
d9ebcbce 355 if (inst.operands[2].addr.writeback)
93d96012 356 regs[rn] = pv_add_constant (regs[rn], imm);
07b287a0 357
07b287a0 358 }
d9ebcbce 359 else if (inst.opcode->iclass == testbranch)
07b287a0
MS
360 {
361 /* Stop analysis on branch. */
362 break;
363 }
364 else
365 {
366 if (aarch64_debug)
b277c936
PL
367 {
368 debug_printf ("aarch64: prologue analysis gave up addr=0x%s"
369 " opcode=0x%x\n",
370 core_addr_to_string_nz (start), insn);
371 }
07b287a0
MS
372 break;
373 }
374 }
375
376 if (cache == NULL)
377 {
378 do_cleanups (back_to);
379 return start;
380 }
381
382 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
383 {
384 /* Frame pointer is fp. Frame size is constant. */
385 cache->framereg = AARCH64_FP_REGNUM;
386 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
387 }
388 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
389 {
390 /* Try the stack pointer. */
391 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
392 cache->framereg = AARCH64_SP_REGNUM;
393 }
394 else
395 {
396 /* We're just out of luck. We don't know where the frame is. */
397 cache->framereg = -1;
398 cache->framesize = 0;
399 }
400
401 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
402 {
403 CORE_ADDR offset;
404
405 if (pv_area_find_reg (stack, gdbarch, i, &offset))
406 cache->saved_regs[i].addr = offset;
407 }
408
409 do_cleanups (back_to);
410 return start;
411}
412
413/* Implement the "skip_prologue" gdbarch method. */
414
415static CORE_ADDR
416aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
417{
418 unsigned long inst;
419 CORE_ADDR skip_pc;
420 CORE_ADDR func_addr, limit_pc;
421 struct symtab_and_line sal;
422
423 /* See if we can determine the end of the prologue via the symbol
424 table. If so, then return either PC, or the PC after the
425 prologue, whichever is greater. */
426 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
427 {
428 CORE_ADDR post_prologue_pc
429 = skip_prologue_using_sal (gdbarch, func_addr);
430
431 if (post_prologue_pc != 0)
432 return max (pc, post_prologue_pc);
433 }
434
435 /* Can't determine prologue from the symbol table, need to examine
436 instructions. */
437
438 /* Find an upper limit on the function prologue using the debug
439 information. If the debug information could not be used to
440 provide that bound, then use an arbitrary large number as the
441 upper bound. */
442 limit_pc = skip_prologue_using_sal (gdbarch, pc);
443 if (limit_pc == 0)
444 limit_pc = pc + 128; /* Magic. */
445
446 /* Try disassembling prologue. */
447 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
448}
449
450/* Scan the function prologue for THIS_FRAME and populate the prologue
451 cache CACHE. */
452
453static void
454aarch64_scan_prologue (struct frame_info *this_frame,
455 struct aarch64_prologue_cache *cache)
456{
457 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
458 CORE_ADDR prologue_start;
459 CORE_ADDR prologue_end;
460 CORE_ADDR prev_pc = get_frame_pc (this_frame);
461 struct gdbarch *gdbarch = get_frame_arch (this_frame);
462
db634143
PL
463 cache->prev_pc = prev_pc;
464
07b287a0
MS
465 /* Assume we do not find a frame. */
466 cache->framereg = -1;
467 cache->framesize = 0;
468
469 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
470 &prologue_end))
471 {
472 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
473
474 if (sal.line == 0)
475 {
476 /* No line info so use the current PC. */
477 prologue_end = prev_pc;
478 }
479 else if (sal.end < prologue_end)
480 {
481 /* The next line begins after the function end. */
482 prologue_end = sal.end;
483 }
484
485 prologue_end = min (prologue_end, prev_pc);
486 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
487 }
488 else
489 {
490 CORE_ADDR frame_loc;
491 LONGEST saved_fp;
492 LONGEST saved_lr;
493 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
494
495 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
496 if (frame_loc == 0)
497 return;
498
499 cache->framereg = AARCH64_FP_REGNUM;
500 cache->framesize = 16;
501 cache->saved_regs[29].addr = 0;
502 cache->saved_regs[30].addr = 8;
503 }
504}
505
7dfa3edc
PL
506/* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
507 function may throw an exception if the inferior's registers or memory is
508 not available. */
07b287a0 509
7dfa3edc
PL
510static void
511aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
512 struct aarch64_prologue_cache *cache)
07b287a0 513{
07b287a0
MS
514 CORE_ADDR unwound_fp;
515 int reg;
516
07b287a0
MS
517 aarch64_scan_prologue (this_frame, cache);
518
519 if (cache->framereg == -1)
7dfa3edc 520 return;
07b287a0
MS
521
522 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
523 if (unwound_fp == 0)
7dfa3edc 524 return;
07b287a0
MS
525
526 cache->prev_sp = unwound_fp + cache->framesize;
527
528 /* Calculate actual addresses of saved registers using offsets
529 determined by aarch64_analyze_prologue. */
530 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
531 if (trad_frame_addr_p (cache->saved_regs, reg))
532 cache->saved_regs[reg].addr += cache->prev_sp;
533
db634143
PL
534 cache->func = get_frame_func (this_frame);
535
7dfa3edc
PL
536 cache->available_p = 1;
537}
538
539/* Allocate and fill in *THIS_CACHE with information about the prologue of
540 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
541 Return a pointer to the current aarch64_prologue_cache in
542 *THIS_CACHE. */
543
544static struct aarch64_prologue_cache *
545aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
546{
547 struct aarch64_prologue_cache *cache;
548
549 if (*this_cache != NULL)
9a3c8263 550 return (struct aarch64_prologue_cache *) *this_cache;
7dfa3edc
PL
551
552 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
553 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
554 *this_cache = cache;
555
556 TRY
557 {
558 aarch64_make_prologue_cache_1 (this_frame, cache);
559 }
560 CATCH (ex, RETURN_MASK_ERROR)
561 {
562 if (ex.error != NOT_AVAILABLE_ERROR)
563 throw_exception (ex);
564 }
565 END_CATCH
566
07b287a0
MS
567 return cache;
568}
569
7dfa3edc
PL
570/* Implement the "stop_reason" frame_unwind method. */
571
572static enum unwind_stop_reason
573aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
574 void **this_cache)
575{
576 struct aarch64_prologue_cache *cache
577 = aarch64_make_prologue_cache (this_frame, this_cache);
578
579 if (!cache->available_p)
580 return UNWIND_UNAVAILABLE;
581
582 /* Halt the backtrace at "_start". */
583 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
584 return UNWIND_OUTERMOST;
585
586 /* We've hit a wall, stop. */
587 if (cache->prev_sp == 0)
588 return UNWIND_OUTERMOST;
589
590 return UNWIND_NO_REASON;
591}
592
07b287a0
MS
593/* Our frame ID for a normal frame is the current function's starting
594 PC and the caller's SP when we were called. */
595
596static void
597aarch64_prologue_this_id (struct frame_info *this_frame,
598 void **this_cache, struct frame_id *this_id)
599{
7c8edfae
PL
600 struct aarch64_prologue_cache *cache
601 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0 602
7dfa3edc
PL
603 if (!cache->available_p)
604 *this_id = frame_id_build_unavailable_stack (cache->func);
605 else
606 *this_id = frame_id_build (cache->prev_sp, cache->func);
07b287a0
MS
607}
608
609/* Implement the "prev_register" frame_unwind method. */
610
611static struct value *
612aarch64_prologue_prev_register (struct frame_info *this_frame,
613 void **this_cache, int prev_regnum)
614{
615 struct gdbarch *gdbarch = get_frame_arch (this_frame);
7c8edfae
PL
616 struct aarch64_prologue_cache *cache
617 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
618
619 /* If we are asked to unwind the PC, then we need to return the LR
620 instead. The prologue may save PC, but it will point into this
621 frame's prologue, not the next frame's resume location. */
622 if (prev_regnum == AARCH64_PC_REGNUM)
623 {
624 CORE_ADDR lr;
625
626 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
627 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
628 }
629
630 /* SP is generally not saved to the stack, but this frame is
631 identified by the next frame's stack pointer at the time of the
632 call. The value was already reconstructed into PREV_SP. */
633 /*
634 +----------+ ^
635 | saved lr | |
636 +->| saved fp |--+
637 | | |
638 | | | <- Previous SP
639 | +----------+
640 | | saved lr |
641 +--| saved fp |<- FP
642 | |
643 | |<- SP
644 +----------+ */
645 if (prev_regnum == AARCH64_SP_REGNUM)
646 return frame_unwind_got_constant (this_frame, prev_regnum,
647 cache->prev_sp);
648
649 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
650 prev_regnum);
651}
652
653/* AArch64 prologue unwinder. */
654struct frame_unwind aarch64_prologue_unwind =
655{
656 NORMAL_FRAME,
7dfa3edc 657 aarch64_prologue_frame_unwind_stop_reason,
07b287a0
MS
658 aarch64_prologue_this_id,
659 aarch64_prologue_prev_register,
660 NULL,
661 default_frame_sniffer
662};
663
8b61f75d
PL
664/* Allocate and fill in *THIS_CACHE with information about the prologue of
665 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
666 Return a pointer to the current aarch64_prologue_cache in
667 *THIS_CACHE. */
07b287a0
MS
668
669static struct aarch64_prologue_cache *
8b61f75d 670aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
07b287a0 671{
07b287a0 672 struct aarch64_prologue_cache *cache;
8b61f75d
PL
673
674 if (*this_cache != NULL)
9a3c8263 675 return (struct aarch64_prologue_cache *) *this_cache;
07b287a0
MS
676
677 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
678 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
8b61f75d 679 *this_cache = cache;
07b287a0 680
02a2a705
PL
681 TRY
682 {
683 cache->prev_sp = get_frame_register_unsigned (this_frame,
684 AARCH64_SP_REGNUM);
685 cache->prev_pc = get_frame_pc (this_frame);
686 cache->available_p = 1;
687 }
688 CATCH (ex, RETURN_MASK_ERROR)
689 {
690 if (ex.error != NOT_AVAILABLE_ERROR)
691 throw_exception (ex);
692 }
693 END_CATCH
07b287a0
MS
694
695 return cache;
696}
697
02a2a705
PL
698/* Implement the "stop_reason" frame_unwind method. */
699
700static enum unwind_stop_reason
701aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
702 void **this_cache)
703{
704 struct aarch64_prologue_cache *cache
705 = aarch64_make_stub_cache (this_frame, this_cache);
706
707 if (!cache->available_p)
708 return UNWIND_UNAVAILABLE;
709
710 return UNWIND_NO_REASON;
711}
712
07b287a0
MS
713/* Our frame ID for a stub frame is the current SP and LR. */
714
715static void
716aarch64_stub_this_id (struct frame_info *this_frame,
717 void **this_cache, struct frame_id *this_id)
718{
8b61f75d
PL
719 struct aarch64_prologue_cache *cache
720 = aarch64_make_stub_cache (this_frame, this_cache);
07b287a0 721
02a2a705
PL
722 if (cache->available_p)
723 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
724 else
725 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
07b287a0
MS
726}
727
728/* Implement the "sniffer" frame_unwind method. */
729
730static int
731aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
732 struct frame_info *this_frame,
733 void **this_prologue_cache)
734{
735 CORE_ADDR addr_in_block;
736 gdb_byte dummy[4];
737
738 addr_in_block = get_frame_address_in_block (this_frame);
3e5d3a5a 739 if (in_plt_section (addr_in_block)
07b287a0
MS
740 /* We also use the stub winder if the target memory is unreadable
741 to avoid having the prologue unwinder trying to read it. */
742 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
743 return 1;
744
745 return 0;
746}
747
748/* AArch64 stub unwinder. */
749struct frame_unwind aarch64_stub_unwind =
750{
751 NORMAL_FRAME,
02a2a705 752 aarch64_stub_frame_unwind_stop_reason,
07b287a0
MS
753 aarch64_stub_this_id,
754 aarch64_prologue_prev_register,
755 NULL,
756 aarch64_stub_unwind_sniffer
757};
758
759/* Return the frame base address of *THIS_FRAME. */
760
761static CORE_ADDR
762aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
763{
7c8edfae
PL
764 struct aarch64_prologue_cache *cache
765 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
766
767 return cache->prev_sp - cache->framesize;
768}
769
770/* AArch64 default frame base information. */
771struct frame_base aarch64_normal_base =
772{
773 &aarch64_prologue_unwind,
774 aarch64_normal_frame_base,
775 aarch64_normal_frame_base,
776 aarch64_normal_frame_base
777};
778
779/* Assuming THIS_FRAME is a dummy, return the frame ID of that
780 dummy frame. The frame ID's base needs to match the TOS value
781 saved by save_dummy_frame_tos () and returned from
782 aarch64_push_dummy_call, and the PC needs to match the dummy
783 frame's breakpoint. */
784
785static struct frame_id
786aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
787{
788 return frame_id_build (get_frame_register_unsigned (this_frame,
789 AARCH64_SP_REGNUM),
790 get_frame_pc (this_frame));
791}
792
793/* Implement the "unwind_pc" gdbarch method. */
794
795static CORE_ADDR
796aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
797{
798 CORE_ADDR pc
799 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
800
801 return pc;
802}
803
804/* Implement the "unwind_sp" gdbarch method. */
805
806static CORE_ADDR
807aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
808{
809 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
810}
811
812/* Return the value of the REGNUM register in the previous frame of
813 *THIS_FRAME. */
814
815static struct value *
816aarch64_dwarf2_prev_register (struct frame_info *this_frame,
817 void **this_cache, int regnum)
818{
819 struct gdbarch *gdbarch = get_frame_arch (this_frame);
820 CORE_ADDR lr;
821
822 switch (regnum)
823 {
824 case AARCH64_PC_REGNUM:
825 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
826 return frame_unwind_got_constant (this_frame, regnum, lr);
827
828 default:
829 internal_error (__FILE__, __LINE__,
830 _("Unexpected register %d"), regnum);
831 }
832}
833
834/* Implement the "init_reg" dwarf2_frame_ops method. */
835
836static void
837aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
838 struct dwarf2_frame_state_reg *reg,
839 struct frame_info *this_frame)
840{
841 switch (regnum)
842 {
843 case AARCH64_PC_REGNUM:
844 reg->how = DWARF2_FRAME_REG_FN;
845 reg->loc.fn = aarch64_dwarf2_prev_register;
846 break;
847 case AARCH64_SP_REGNUM:
848 reg->how = DWARF2_FRAME_REG_CFA;
849 break;
850 }
851}
852
853/* When arguments must be pushed onto the stack, they go on in reverse
854 order. The code below implements a FILO (stack) to do this. */
855
856typedef struct
857{
c3c87445
YQ
858 /* Value to pass on stack. It can be NULL if this item is for stack
859 padding. */
7c543f7b 860 const gdb_byte *data;
07b287a0
MS
861
862 /* Size in bytes of value to pass on stack. */
863 int len;
864} stack_item_t;
865
866DEF_VEC_O (stack_item_t);
867
868/* Return the alignment (in bytes) of the given type. */
869
870static int
871aarch64_type_align (struct type *t)
872{
873 int n;
874 int align;
875 int falign;
876
877 t = check_typedef (t);
878 switch (TYPE_CODE (t))
879 {
880 default:
881 /* Should never happen. */
882 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
883 return 4;
884
885 case TYPE_CODE_PTR:
886 case TYPE_CODE_ENUM:
887 case TYPE_CODE_INT:
888 case TYPE_CODE_FLT:
889 case TYPE_CODE_SET:
890 case TYPE_CODE_RANGE:
891 case TYPE_CODE_BITSTRING:
892 case TYPE_CODE_REF:
893 case TYPE_CODE_CHAR:
894 case TYPE_CODE_BOOL:
895 return TYPE_LENGTH (t);
896
897 case TYPE_CODE_ARRAY:
238f2452
YQ
898 if (TYPE_VECTOR (t))
899 {
900 /* Use the natural alignment for vector types (the same for
901 scalar type), but the maximum alignment is 128-bit. */
902 if (TYPE_LENGTH (t) > 16)
903 return 16;
904 else
905 return TYPE_LENGTH (t);
906 }
907 else
908 return aarch64_type_align (TYPE_TARGET_TYPE (t));
07b287a0
MS
909 case TYPE_CODE_COMPLEX:
910 return aarch64_type_align (TYPE_TARGET_TYPE (t));
911
912 case TYPE_CODE_STRUCT:
913 case TYPE_CODE_UNION:
914 align = 1;
915 for (n = 0; n < TYPE_NFIELDS (t); n++)
916 {
917 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
918 if (falign > align)
919 align = falign;
920 }
921 return align;
922 }
923}
924
cd635f74
YQ
925/* Return 1 if *TY is a homogeneous floating-point aggregate or
926 homogeneous short-vector aggregate as defined in the AAPCS64 ABI
927 document; otherwise return 0. */
07b287a0
MS
928
929static int
cd635f74 930is_hfa_or_hva (struct type *ty)
07b287a0
MS
931{
932 switch (TYPE_CODE (ty))
933 {
934 case TYPE_CODE_ARRAY:
935 {
936 struct type *target_ty = TYPE_TARGET_TYPE (ty);
238f2452
YQ
937
938 if (TYPE_VECTOR (ty))
939 return 0;
940
cd635f74
YQ
941 if (TYPE_LENGTH (ty) <= 4 /* HFA or HVA has at most 4 members. */
942 && (TYPE_CODE (target_ty) == TYPE_CODE_FLT /* HFA */
943 || (TYPE_CODE (target_ty) == TYPE_CODE_ARRAY /* HVA */
944 && TYPE_VECTOR (target_ty))))
07b287a0
MS
945 return 1;
946 break;
947 }
948
949 case TYPE_CODE_UNION:
950 case TYPE_CODE_STRUCT:
951 {
cd635f74 952 /* HFA or HVA has at most four members. */
07b287a0
MS
953 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
954 {
955 struct type *member0_type;
956
957 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
cd635f74
YQ
958 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT
959 || (TYPE_CODE (member0_type) == TYPE_CODE_ARRAY
960 && TYPE_VECTOR (member0_type)))
07b287a0
MS
961 {
962 int i;
963
964 for (i = 0; i < TYPE_NFIELDS (ty); i++)
965 {
966 struct type *member1_type;
967
968 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
969 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
970 || (TYPE_LENGTH (member0_type)
971 != TYPE_LENGTH (member1_type)))
972 return 0;
973 }
974 return 1;
975 }
976 }
977 return 0;
978 }
979
980 default:
981 break;
982 }
983
984 return 0;
985}
986
987/* AArch64 function call information structure. */
988struct aarch64_call_info
989{
990 /* the current argument number. */
991 unsigned argnum;
992
993 /* The next general purpose register number, equivalent to NGRN as
994 described in the AArch64 Procedure Call Standard. */
995 unsigned ngrn;
996
997 /* The next SIMD and floating point register number, equivalent to
998 NSRN as described in the AArch64 Procedure Call Standard. */
999 unsigned nsrn;
1000
1001 /* The next stacked argument address, equivalent to NSAA as
1002 described in the AArch64 Procedure Call Standard. */
1003 unsigned nsaa;
1004
1005 /* Stack item vector. */
1006 VEC(stack_item_t) *si;
1007};
1008
1009/* Pass a value in a sequence of consecutive X registers. The caller
1010 is responsbile for ensuring sufficient registers are available. */
1011
1012static void
1013pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1014 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1015 struct value *arg)
07b287a0
MS
1016{
1017 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1018 int len = TYPE_LENGTH (type);
1019 enum type_code typecode = TYPE_CODE (type);
1020 int regnum = AARCH64_X0_REGNUM + info->ngrn;
8e80f9d1 1021 const bfd_byte *buf = value_contents (arg);
07b287a0
MS
1022
1023 info->argnum++;
1024
1025 while (len > 0)
1026 {
1027 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1028 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1029 byte_order);
1030
1031
1032 /* Adjust sub-word struct/union args when big-endian. */
1033 if (byte_order == BFD_ENDIAN_BIG
1034 && partial_len < X_REGISTER_SIZE
1035 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1036 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1037
1038 if (aarch64_debug)
b277c936
PL
1039 {
1040 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1041 gdbarch_register_name (gdbarch, regnum),
1042 phex (regval, X_REGISTER_SIZE));
1043 }
07b287a0
MS
1044 regcache_cooked_write_unsigned (regcache, regnum, regval);
1045 len -= partial_len;
1046 buf += partial_len;
1047 regnum++;
1048 }
1049}
1050
1051/* Attempt to marshall a value in a V register. Return 1 if
1052 successful, or 0 if insufficient registers are available. This
1053 function, unlike the equivalent pass_in_x() function does not
1054 handle arguments spread across multiple registers. */
1055
1056static int
1057pass_in_v (struct gdbarch *gdbarch,
1058 struct regcache *regcache,
1059 struct aarch64_call_info *info,
0735fddd 1060 int len, const bfd_byte *buf)
07b287a0
MS
1061{
1062 if (info->nsrn < 8)
1063 {
1064 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1065 int regnum = AARCH64_V0_REGNUM + info->nsrn;
0735fddd 1066 gdb_byte reg[V_REGISTER_SIZE];
07b287a0
MS
1067
1068 info->argnum++;
1069 info->nsrn++;
1070
0735fddd
YQ
1071 memset (reg, 0, sizeof (reg));
1072 /* PCS C.1, the argument is allocated to the least significant
1073 bits of V register. */
1074 memcpy (reg, buf, len);
1075 regcache_cooked_write (regcache, regnum, reg);
1076
07b287a0 1077 if (aarch64_debug)
b277c936
PL
1078 {
1079 debug_printf ("arg %d in %s\n", info->argnum,
1080 gdbarch_register_name (gdbarch, regnum));
1081 }
07b287a0
MS
1082 return 1;
1083 }
1084 info->nsrn = 8;
1085 return 0;
1086}
1087
1088/* Marshall an argument onto the stack. */
1089
1090static void
1091pass_on_stack (struct aarch64_call_info *info, struct type *type,
8e80f9d1 1092 struct value *arg)
07b287a0 1093{
8e80f9d1 1094 const bfd_byte *buf = value_contents (arg);
07b287a0
MS
1095 int len = TYPE_LENGTH (type);
1096 int align;
1097 stack_item_t item;
1098
1099 info->argnum++;
1100
1101 align = aarch64_type_align (type);
1102
1103 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1104 Natural alignment of the argument's type. */
1105 align = align_up (align, 8);
1106
1107 /* The AArch64 PCS requires at most doubleword alignment. */
1108 if (align > 16)
1109 align = 16;
1110
1111 if (aarch64_debug)
b277c936
PL
1112 {
1113 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1114 info->nsaa);
1115 }
07b287a0
MS
1116
1117 item.len = len;
1118 item.data = buf;
1119 VEC_safe_push (stack_item_t, info->si, &item);
1120
1121 info->nsaa += len;
1122 if (info->nsaa & (align - 1))
1123 {
1124 /* Push stack alignment padding. */
1125 int pad = align - (info->nsaa & (align - 1));
1126
1127 item.len = pad;
c3c87445 1128 item.data = NULL;
07b287a0
MS
1129
1130 VEC_safe_push (stack_item_t, info->si, &item);
1131 info->nsaa += pad;
1132 }
1133}
1134
1135/* Marshall an argument into a sequence of one or more consecutive X
1136 registers or, if insufficient X registers are available then onto
1137 the stack. */
1138
1139static void
1140pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1141 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1142 struct value *arg)
07b287a0
MS
1143{
1144 int len = TYPE_LENGTH (type);
1145 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1146
1147 /* PCS C.13 - Pass in registers if we have enough spare */
1148 if (info->ngrn + nregs <= 8)
1149 {
8e80f9d1 1150 pass_in_x (gdbarch, regcache, info, type, arg);
07b287a0
MS
1151 info->ngrn += nregs;
1152 }
1153 else
1154 {
1155 info->ngrn = 8;
8e80f9d1 1156 pass_on_stack (info, type, arg);
07b287a0
MS
1157 }
1158}
1159
1160/* Pass a value in a V register, or on the stack if insufficient are
1161 available. */
1162
1163static void
1164pass_in_v_or_stack (struct gdbarch *gdbarch,
1165 struct regcache *regcache,
1166 struct aarch64_call_info *info,
1167 struct type *type,
8e80f9d1 1168 struct value *arg)
07b287a0 1169{
0735fddd
YQ
1170 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (type),
1171 value_contents (arg)))
8e80f9d1 1172 pass_on_stack (info, type, arg);
07b287a0
MS
1173}
1174
1175/* Implement the "push_dummy_call" gdbarch method. */
1176
1177static CORE_ADDR
1178aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1179 struct regcache *regcache, CORE_ADDR bp_addr,
1180 int nargs,
1181 struct value **args, CORE_ADDR sp, int struct_return,
1182 CORE_ADDR struct_addr)
1183{
1184 int nstack = 0;
1185 int argnum;
1186 int x_argreg;
1187 int v_argreg;
1188 struct aarch64_call_info info;
1189 struct type *func_type;
1190 struct type *return_type;
1191 int lang_struct_return;
1192
1193 memset (&info, 0, sizeof (info));
1194
1195 /* We need to know what the type of the called function is in order
1196 to determine the number of named/anonymous arguments for the
1197 actual argument placement, and the return type in order to handle
1198 return value correctly.
1199
1200 The generic code above us views the decision of return in memory
1201 or return in registers as a two stage processes. The language
1202 handler is consulted first and may decide to return in memory (eg
1203 class with copy constructor returned by value), this will cause
1204 the generic code to allocate space AND insert an initial leading
1205 argument.
1206
1207 If the language code does not decide to pass in memory then the
1208 target code is consulted.
1209
1210 If the language code decides to pass in memory we want to move
1211 the pointer inserted as the initial argument from the argument
1212 list and into X8, the conventional AArch64 struct return pointer
1213 register.
1214
1215 This is slightly awkward, ideally the flag "lang_struct_return"
1216 would be passed to the targets implementation of push_dummy_call.
1217 Rather that change the target interface we call the language code
1218 directly ourselves. */
1219
1220 func_type = check_typedef (value_type (function));
1221
1222 /* Dereference function pointer types. */
1223 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1224 func_type = TYPE_TARGET_TYPE (func_type);
1225
1226 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1227 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1228
1229 /* If language_pass_by_reference () returned true we will have been
1230 given an additional initial argument, a hidden pointer to the
1231 return slot in memory. */
1232 return_type = TYPE_TARGET_TYPE (func_type);
1233 lang_struct_return = language_pass_by_reference (return_type);
1234
1235 /* Set the return address. For the AArch64, the return breakpoint
1236 is always at BP_ADDR. */
1237 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1238
1239 /* If we were given an initial argument for the return slot because
1240 lang_struct_return was true, lose it. */
1241 if (lang_struct_return)
1242 {
1243 args++;
1244 nargs--;
1245 }
1246
1247 /* The struct_return pointer occupies X8. */
1248 if (struct_return || lang_struct_return)
1249 {
1250 if (aarch64_debug)
b277c936
PL
1251 {
1252 debug_printf ("struct return in %s = 0x%s\n",
1253 gdbarch_register_name (gdbarch,
1254 AARCH64_STRUCT_RETURN_REGNUM),
1255 paddress (gdbarch, struct_addr));
1256 }
07b287a0
MS
1257 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1258 struct_addr);
1259 }
1260
1261 for (argnum = 0; argnum < nargs; argnum++)
1262 {
1263 struct value *arg = args[argnum];
1264 struct type *arg_type;
1265 int len;
1266
1267 arg_type = check_typedef (value_type (arg));
1268 len = TYPE_LENGTH (arg_type);
1269
1270 switch (TYPE_CODE (arg_type))
1271 {
1272 case TYPE_CODE_INT:
1273 case TYPE_CODE_BOOL:
1274 case TYPE_CODE_CHAR:
1275 case TYPE_CODE_RANGE:
1276 case TYPE_CODE_ENUM:
1277 if (len < 4)
1278 {
1279 /* Promote to 32 bit integer. */
1280 if (TYPE_UNSIGNED (arg_type))
1281 arg_type = builtin_type (gdbarch)->builtin_uint32;
1282 else
1283 arg_type = builtin_type (gdbarch)->builtin_int32;
1284 arg = value_cast (arg_type, arg);
1285 }
8e80f9d1 1286 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1287 break;
1288
1289 case TYPE_CODE_COMPLEX:
1290 if (info.nsrn <= 6)
1291 {
1292 const bfd_byte *buf = value_contents (arg);
1293 struct type *target_type =
1294 check_typedef (TYPE_TARGET_TYPE (arg_type));
1295
07b287a0 1296 pass_in_v (gdbarch, regcache, &info,
0735fddd
YQ
1297 TYPE_LENGTH (target_type), buf);
1298 pass_in_v (gdbarch, regcache, &info,
1299 TYPE_LENGTH (target_type),
07b287a0
MS
1300 buf + TYPE_LENGTH (target_type));
1301 }
1302 else
1303 {
1304 info.nsrn = 8;
8e80f9d1 1305 pass_on_stack (&info, arg_type, arg);
07b287a0
MS
1306 }
1307 break;
1308 case TYPE_CODE_FLT:
8e80f9d1 1309 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1310 break;
1311
1312 case TYPE_CODE_STRUCT:
1313 case TYPE_CODE_ARRAY:
1314 case TYPE_CODE_UNION:
cd635f74 1315 if (is_hfa_or_hva (arg_type))
07b287a0
MS
1316 {
1317 int elements = TYPE_NFIELDS (arg_type);
1318
1319 /* Homogeneous Aggregates */
1320 if (info.nsrn + elements < 8)
1321 {
1322 int i;
1323
1324 for (i = 0; i < elements; i++)
1325 {
1326 /* We know that we have sufficient registers
1327 available therefore this will never fallback
1328 to the stack. */
1329 struct value *field =
1330 value_primitive_field (arg, 0, i, arg_type);
1331 struct type *field_type =
1332 check_typedef (value_type (field));
1333
8e80f9d1
YQ
1334 pass_in_v_or_stack (gdbarch, regcache, &info,
1335 field_type, field);
07b287a0
MS
1336 }
1337 }
1338 else
1339 {
1340 info.nsrn = 8;
8e80f9d1 1341 pass_on_stack (&info, arg_type, arg);
07b287a0
MS
1342 }
1343 }
238f2452
YQ
1344 else if (TYPE_CODE (arg_type) == TYPE_CODE_ARRAY
1345 && TYPE_VECTOR (arg_type) && (len == 16 || len == 8))
1346 {
1347 /* Short vector types are passed in V registers. */
1348 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1349 }
07b287a0
MS
1350 else if (len > 16)
1351 {
1352 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1353 invisible reference. */
1354
1355 /* Allocate aligned storage. */
1356 sp = align_down (sp - len, 16);
1357
1358 /* Write the real data into the stack. */
1359 write_memory (sp, value_contents (arg), len);
1360
1361 /* Construct the indirection. */
1362 arg_type = lookup_pointer_type (arg_type);
1363 arg = value_from_pointer (arg_type, sp);
8e80f9d1 1364 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1365 }
1366 else
1367 /* PCS C.15 / C.18 multiple values pass. */
8e80f9d1 1368 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1369 break;
1370
1371 default:
8e80f9d1 1372 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1373 break;
1374 }
1375 }
1376
1377 /* Make sure stack retains 16 byte alignment. */
1378 if (info.nsaa & 15)
1379 sp -= 16 - (info.nsaa & 15);
1380
1381 while (!VEC_empty (stack_item_t, info.si))
1382 {
1383 stack_item_t *si = VEC_last (stack_item_t, info.si);
1384
1385 sp -= si->len;
c3c87445
YQ
1386 if (si->data != NULL)
1387 write_memory (sp, si->data, si->len);
07b287a0
MS
1388 VEC_pop (stack_item_t, info.si);
1389 }
1390
1391 VEC_free (stack_item_t, info.si);
1392
1393 /* Finally, update the SP register. */
1394 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1395
1396 return sp;
1397}
1398
1399/* Implement the "frame_align" gdbarch method. */
1400
1401static CORE_ADDR
1402aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1403{
1404 /* Align the stack to sixteen bytes. */
1405 return sp & ~(CORE_ADDR) 15;
1406}
1407
1408/* Return the type for an AdvSISD Q register. */
1409
1410static struct type *
1411aarch64_vnq_type (struct gdbarch *gdbarch)
1412{
1413 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1414
1415 if (tdep->vnq_type == NULL)
1416 {
1417 struct type *t;
1418 struct type *elem;
1419
1420 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1421 TYPE_CODE_UNION);
1422
1423 elem = builtin_type (gdbarch)->builtin_uint128;
1424 append_composite_type_field (t, "u", elem);
1425
1426 elem = builtin_type (gdbarch)->builtin_int128;
1427 append_composite_type_field (t, "s", elem);
1428
1429 tdep->vnq_type = t;
1430 }
1431
1432 return tdep->vnq_type;
1433}
1434
1435/* Return the type for an AdvSISD D register. */
1436
1437static struct type *
1438aarch64_vnd_type (struct gdbarch *gdbarch)
1439{
1440 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1441
1442 if (tdep->vnd_type == NULL)
1443 {
1444 struct type *t;
1445 struct type *elem;
1446
1447 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1448 TYPE_CODE_UNION);
1449
1450 elem = builtin_type (gdbarch)->builtin_double;
1451 append_composite_type_field (t, "f", elem);
1452
1453 elem = builtin_type (gdbarch)->builtin_uint64;
1454 append_composite_type_field (t, "u", elem);
1455
1456 elem = builtin_type (gdbarch)->builtin_int64;
1457 append_composite_type_field (t, "s", elem);
1458
1459 tdep->vnd_type = t;
1460 }
1461
1462 return tdep->vnd_type;
1463}
1464
1465/* Return the type for an AdvSISD S register. */
1466
1467static struct type *
1468aarch64_vns_type (struct gdbarch *gdbarch)
1469{
1470 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1471
1472 if (tdep->vns_type == NULL)
1473 {
1474 struct type *t;
1475 struct type *elem;
1476
1477 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1478 TYPE_CODE_UNION);
1479
1480 elem = builtin_type (gdbarch)->builtin_float;
1481 append_composite_type_field (t, "f", elem);
1482
1483 elem = builtin_type (gdbarch)->builtin_uint32;
1484 append_composite_type_field (t, "u", elem);
1485
1486 elem = builtin_type (gdbarch)->builtin_int32;
1487 append_composite_type_field (t, "s", elem);
1488
1489 tdep->vns_type = t;
1490 }
1491
1492 return tdep->vns_type;
1493}
1494
1495/* Return the type for an AdvSISD H register. */
1496
1497static struct type *
1498aarch64_vnh_type (struct gdbarch *gdbarch)
1499{
1500 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1501
1502 if (tdep->vnh_type == NULL)
1503 {
1504 struct type *t;
1505 struct type *elem;
1506
1507 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1508 TYPE_CODE_UNION);
1509
1510 elem = builtin_type (gdbarch)->builtin_uint16;
1511 append_composite_type_field (t, "u", elem);
1512
1513 elem = builtin_type (gdbarch)->builtin_int16;
1514 append_composite_type_field (t, "s", elem);
1515
1516 tdep->vnh_type = t;
1517 }
1518
1519 return tdep->vnh_type;
1520}
1521
1522/* Return the type for an AdvSISD B register. */
1523
1524static struct type *
1525aarch64_vnb_type (struct gdbarch *gdbarch)
1526{
1527 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1528
1529 if (tdep->vnb_type == NULL)
1530 {
1531 struct type *t;
1532 struct type *elem;
1533
1534 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1535 TYPE_CODE_UNION);
1536
1537 elem = builtin_type (gdbarch)->builtin_uint8;
1538 append_composite_type_field (t, "u", elem);
1539
1540 elem = builtin_type (gdbarch)->builtin_int8;
1541 append_composite_type_field (t, "s", elem);
1542
1543 tdep->vnb_type = t;
1544 }
1545
1546 return tdep->vnb_type;
1547}
1548
1549/* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1550
1551static int
1552aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1553{
1554 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1555 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1556
1557 if (reg == AARCH64_DWARF_SP)
1558 return AARCH64_SP_REGNUM;
1559
1560 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1561 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1562
1563 return -1;
1564}
1565\f
1566
1567/* Implement the "print_insn" gdbarch method. */
1568
1569static int
1570aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1571{
1572 info->symbols = NULL;
1573 return print_insn_aarch64 (memaddr, info);
1574}
1575
1576/* AArch64 BRK software debug mode instruction.
1577 Note that AArch64 code is always little-endian.
1578 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
948f8e3d 1579static const gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
07b287a0
MS
1580
1581/* Implement the "breakpoint_from_pc" gdbarch method. */
1582
948f8e3d 1583static const gdb_byte *
07b287a0
MS
1584aarch64_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
1585 int *lenptr)
1586{
1587 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1588
1589 *lenptr = sizeof (aarch64_default_breakpoint);
1590 return aarch64_default_breakpoint;
1591}
1592
1593/* Extract from an array REGS containing the (raw) register state a
1594 function return value of type TYPE, and copy that, in virtual
1595 format, into VALBUF. */
1596
1597static void
1598aarch64_extract_return_value (struct type *type, struct regcache *regs,
1599 gdb_byte *valbuf)
1600{
1601 struct gdbarch *gdbarch = get_regcache_arch (regs);
1602 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1603
1604 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1605 {
1606 bfd_byte buf[V_REGISTER_SIZE];
1607 int len = TYPE_LENGTH (type);
1608
1609 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1610 memcpy (valbuf, buf, len);
1611 }
1612 else if (TYPE_CODE (type) == TYPE_CODE_INT
1613 || TYPE_CODE (type) == TYPE_CODE_CHAR
1614 || TYPE_CODE (type) == TYPE_CODE_BOOL
1615 || TYPE_CODE (type) == TYPE_CODE_PTR
1616 || TYPE_CODE (type) == TYPE_CODE_REF
1617 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1618 {
1619 /* If the the type is a plain integer, then the access is
1620 straight-forward. Otherwise we have to play around a bit
1621 more. */
1622 int len = TYPE_LENGTH (type);
1623 int regno = AARCH64_X0_REGNUM;
1624 ULONGEST tmp;
1625
1626 while (len > 0)
1627 {
1628 /* By using store_unsigned_integer we avoid having to do
1629 anything special for small big-endian values. */
1630 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1631 store_unsigned_integer (valbuf,
1632 (len > X_REGISTER_SIZE
1633 ? X_REGISTER_SIZE : len), byte_order, tmp);
1634 len -= X_REGISTER_SIZE;
1635 valbuf += X_REGISTER_SIZE;
1636 }
1637 }
1638 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
1639 {
1640 int regno = AARCH64_V0_REGNUM;
1641 bfd_byte buf[V_REGISTER_SIZE];
1642 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1643 int len = TYPE_LENGTH (target_type);
1644
1645 regcache_cooked_read (regs, regno, buf);
1646 memcpy (valbuf, buf, len);
1647 valbuf += len;
1648 regcache_cooked_read (regs, regno + 1, buf);
1649 memcpy (valbuf, buf, len);
1650 valbuf += len;
1651 }
cd635f74 1652 else if (is_hfa_or_hva (type))
07b287a0
MS
1653 {
1654 int elements = TYPE_NFIELDS (type);
1655 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1656 int len = TYPE_LENGTH (member_type);
1657 int i;
1658
1659 for (i = 0; i < elements; i++)
1660 {
1661 int regno = AARCH64_V0_REGNUM + i;
db3516bb 1662 bfd_byte buf[V_REGISTER_SIZE];
07b287a0
MS
1663
1664 if (aarch64_debug)
b277c936 1665 {
cd635f74 1666 debug_printf ("read HFA or HVA return value element %d from %s\n",
b277c936
PL
1667 i + 1,
1668 gdbarch_register_name (gdbarch, regno));
1669 }
07b287a0
MS
1670 regcache_cooked_read (regs, regno, buf);
1671
1672 memcpy (valbuf, buf, len);
1673 valbuf += len;
1674 }
1675 }
238f2452
YQ
1676 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1677 && (TYPE_LENGTH (type) == 16 || TYPE_LENGTH (type) == 8))
1678 {
1679 /* Short vector is returned in V register. */
1680 gdb_byte buf[V_REGISTER_SIZE];
1681
1682 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1683 memcpy (valbuf, buf, TYPE_LENGTH (type));
1684 }
07b287a0
MS
1685 else
1686 {
1687 /* For a structure or union the behaviour is as if the value had
1688 been stored to word-aligned memory and then loaded into
1689 registers with 64-bit load instruction(s). */
1690 int len = TYPE_LENGTH (type);
1691 int regno = AARCH64_X0_REGNUM;
1692 bfd_byte buf[X_REGISTER_SIZE];
1693
1694 while (len > 0)
1695 {
1696 regcache_cooked_read (regs, regno++, buf);
1697 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1698 len -= X_REGISTER_SIZE;
1699 valbuf += X_REGISTER_SIZE;
1700 }
1701 }
1702}
1703
1704
1705/* Will a function return an aggregate type in memory or in a
1706 register? Return 0 if an aggregate type can be returned in a
1707 register, 1 if it must be returned in memory. */
1708
1709static int
1710aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1711{
1712 int nRc;
1713 enum type_code code;
1714
f168693b 1715 type = check_typedef (type);
07b287a0 1716
cd635f74 1717 if (is_hfa_or_hva (type))
07b287a0 1718 {
cd635f74
YQ
1719 /* v0-v7 are used to return values and one register is allocated
1720 for one member. However, HFA or HVA has at most four members. */
07b287a0
MS
1721 return 0;
1722 }
1723
1724 if (TYPE_LENGTH (type) > 16)
1725 {
1726 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1727 invisible reference. */
1728
1729 return 1;
1730 }
1731
1732 return 0;
1733}
1734
1735/* Write into appropriate registers a function return value of type
1736 TYPE, given in virtual format. */
1737
1738static void
1739aarch64_store_return_value (struct type *type, struct regcache *regs,
1740 const gdb_byte *valbuf)
1741{
1742 struct gdbarch *gdbarch = get_regcache_arch (regs);
1743 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1744
1745 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1746 {
1747 bfd_byte buf[V_REGISTER_SIZE];
1748 int len = TYPE_LENGTH (type);
1749
1750 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
1751 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1752 }
1753 else if (TYPE_CODE (type) == TYPE_CODE_INT
1754 || TYPE_CODE (type) == TYPE_CODE_CHAR
1755 || TYPE_CODE (type) == TYPE_CODE_BOOL
1756 || TYPE_CODE (type) == TYPE_CODE_PTR
1757 || TYPE_CODE (type) == TYPE_CODE_REF
1758 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1759 {
1760 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
1761 {
1762 /* Values of one word or less are zero/sign-extended and
1763 returned in r0. */
1764 bfd_byte tmpbuf[X_REGISTER_SIZE];
1765 LONGEST val = unpack_long (type, valbuf);
1766
1767 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
1768 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
1769 }
1770 else
1771 {
1772 /* Integral values greater than one word are stored in
1773 consecutive registers starting with r0. This will always
1774 be a multiple of the regiser size. */
1775 int len = TYPE_LENGTH (type);
1776 int regno = AARCH64_X0_REGNUM;
1777
1778 while (len > 0)
1779 {
1780 regcache_cooked_write (regs, regno++, valbuf);
1781 len -= X_REGISTER_SIZE;
1782 valbuf += X_REGISTER_SIZE;
1783 }
1784 }
1785 }
cd635f74 1786 else if (is_hfa_or_hva (type))
07b287a0
MS
1787 {
1788 int elements = TYPE_NFIELDS (type);
1789 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1790 int len = TYPE_LENGTH (member_type);
1791 int i;
1792
1793 for (i = 0; i < elements; i++)
1794 {
1795 int regno = AARCH64_V0_REGNUM + i;
1796 bfd_byte tmpbuf[MAX_REGISTER_SIZE];
1797
1798 if (aarch64_debug)
b277c936 1799 {
cd635f74 1800 debug_printf ("write HFA or HVA return value element %d to %s\n",
b277c936
PL
1801 i + 1,
1802 gdbarch_register_name (gdbarch, regno));
1803 }
07b287a0
MS
1804
1805 memcpy (tmpbuf, valbuf, len);
1806 regcache_cooked_write (regs, regno, tmpbuf);
1807 valbuf += len;
1808 }
1809 }
238f2452
YQ
1810 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1811 && (TYPE_LENGTH (type) == 8 || TYPE_LENGTH (type) == 16))
1812 {
1813 /* Short vector. */
1814 gdb_byte buf[V_REGISTER_SIZE];
1815
1816 memcpy (buf, valbuf, TYPE_LENGTH (type));
1817 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1818 }
07b287a0
MS
1819 else
1820 {
1821 /* For a structure or union the behaviour is as if the value had
1822 been stored to word-aligned memory and then loaded into
1823 registers with 64-bit load instruction(s). */
1824 int len = TYPE_LENGTH (type);
1825 int regno = AARCH64_X0_REGNUM;
1826 bfd_byte tmpbuf[X_REGISTER_SIZE];
1827
1828 while (len > 0)
1829 {
1830 memcpy (tmpbuf, valbuf,
1831 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1832 regcache_cooked_write (regs, regno++, tmpbuf);
1833 len -= X_REGISTER_SIZE;
1834 valbuf += X_REGISTER_SIZE;
1835 }
1836 }
1837}
1838
1839/* Implement the "return_value" gdbarch method. */
1840
1841static enum return_value_convention
1842aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
1843 struct type *valtype, struct regcache *regcache,
1844 gdb_byte *readbuf, const gdb_byte *writebuf)
1845{
1846 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1847
1848 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
1849 || TYPE_CODE (valtype) == TYPE_CODE_UNION
1850 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
1851 {
1852 if (aarch64_return_in_memory (gdbarch, valtype))
1853 {
1854 if (aarch64_debug)
b277c936 1855 debug_printf ("return value in memory\n");
07b287a0
MS
1856 return RETURN_VALUE_STRUCT_CONVENTION;
1857 }
1858 }
1859
1860 if (writebuf)
1861 aarch64_store_return_value (valtype, regcache, writebuf);
1862
1863 if (readbuf)
1864 aarch64_extract_return_value (valtype, regcache, readbuf);
1865
1866 if (aarch64_debug)
b277c936 1867 debug_printf ("return value in registers\n");
07b287a0
MS
1868
1869 return RETURN_VALUE_REGISTER_CONVENTION;
1870}
1871
1872/* Implement the "get_longjmp_target" gdbarch method. */
1873
1874static int
1875aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
1876{
1877 CORE_ADDR jb_addr;
1878 gdb_byte buf[X_REGISTER_SIZE];
1879 struct gdbarch *gdbarch = get_frame_arch (frame);
1880 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1881 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1882
1883 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
1884
1885 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
1886 X_REGISTER_SIZE))
1887 return 0;
1888
1889 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
1890 return 1;
1891}
ea873d8e
PL
1892
1893/* Implement the "gen_return_address" gdbarch method. */
1894
1895static void
1896aarch64_gen_return_address (struct gdbarch *gdbarch,
1897 struct agent_expr *ax, struct axs_value *value,
1898 CORE_ADDR scope)
1899{
1900 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
1901 value->kind = axs_lvalue_register;
1902 value->u.reg = AARCH64_LR_REGNUM;
1903}
07b287a0
MS
1904\f
1905
1906/* Return the pseudo register name corresponding to register regnum. */
1907
1908static const char *
1909aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
1910{
1911 static const char *const q_name[] =
1912 {
1913 "q0", "q1", "q2", "q3",
1914 "q4", "q5", "q6", "q7",
1915 "q8", "q9", "q10", "q11",
1916 "q12", "q13", "q14", "q15",
1917 "q16", "q17", "q18", "q19",
1918 "q20", "q21", "q22", "q23",
1919 "q24", "q25", "q26", "q27",
1920 "q28", "q29", "q30", "q31",
1921 };
1922
1923 static const char *const d_name[] =
1924 {
1925 "d0", "d1", "d2", "d3",
1926 "d4", "d5", "d6", "d7",
1927 "d8", "d9", "d10", "d11",
1928 "d12", "d13", "d14", "d15",
1929 "d16", "d17", "d18", "d19",
1930 "d20", "d21", "d22", "d23",
1931 "d24", "d25", "d26", "d27",
1932 "d28", "d29", "d30", "d31",
1933 };
1934
1935 static const char *const s_name[] =
1936 {
1937 "s0", "s1", "s2", "s3",
1938 "s4", "s5", "s6", "s7",
1939 "s8", "s9", "s10", "s11",
1940 "s12", "s13", "s14", "s15",
1941 "s16", "s17", "s18", "s19",
1942 "s20", "s21", "s22", "s23",
1943 "s24", "s25", "s26", "s27",
1944 "s28", "s29", "s30", "s31",
1945 };
1946
1947 static const char *const h_name[] =
1948 {
1949 "h0", "h1", "h2", "h3",
1950 "h4", "h5", "h6", "h7",
1951 "h8", "h9", "h10", "h11",
1952 "h12", "h13", "h14", "h15",
1953 "h16", "h17", "h18", "h19",
1954 "h20", "h21", "h22", "h23",
1955 "h24", "h25", "h26", "h27",
1956 "h28", "h29", "h30", "h31",
1957 };
1958
1959 static const char *const b_name[] =
1960 {
1961 "b0", "b1", "b2", "b3",
1962 "b4", "b5", "b6", "b7",
1963 "b8", "b9", "b10", "b11",
1964 "b12", "b13", "b14", "b15",
1965 "b16", "b17", "b18", "b19",
1966 "b20", "b21", "b22", "b23",
1967 "b24", "b25", "b26", "b27",
1968 "b28", "b29", "b30", "b31",
1969 };
1970
1971 regnum -= gdbarch_num_regs (gdbarch);
1972
1973 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
1974 return q_name[regnum - AARCH64_Q0_REGNUM];
1975
1976 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
1977 return d_name[regnum - AARCH64_D0_REGNUM];
1978
1979 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
1980 return s_name[regnum - AARCH64_S0_REGNUM];
1981
1982 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
1983 return h_name[regnum - AARCH64_H0_REGNUM];
1984
1985 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
1986 return b_name[regnum - AARCH64_B0_REGNUM];
1987
1988 internal_error (__FILE__, __LINE__,
1989 _("aarch64_pseudo_register_name: bad register number %d"),
1990 regnum);
1991}
1992
1993/* Implement the "pseudo_register_type" tdesc_arch_data method. */
1994
1995static struct type *
1996aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
1997{
1998 regnum -= gdbarch_num_regs (gdbarch);
1999
2000 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2001 return aarch64_vnq_type (gdbarch);
2002
2003 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2004 return aarch64_vnd_type (gdbarch);
2005
2006 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2007 return aarch64_vns_type (gdbarch);
2008
2009 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2010 return aarch64_vnh_type (gdbarch);
2011
2012 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2013 return aarch64_vnb_type (gdbarch);
2014
2015 internal_error (__FILE__, __LINE__,
2016 _("aarch64_pseudo_register_type: bad register number %d"),
2017 regnum);
2018}
2019
2020/* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2021
2022static int
2023aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2024 struct reggroup *group)
2025{
2026 regnum -= gdbarch_num_regs (gdbarch);
2027
2028 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2029 return group == all_reggroup || group == vector_reggroup;
2030 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2031 return (group == all_reggroup || group == vector_reggroup
2032 || group == float_reggroup);
2033 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2034 return (group == all_reggroup || group == vector_reggroup
2035 || group == float_reggroup);
2036 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2037 return group == all_reggroup || group == vector_reggroup;
2038 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2039 return group == all_reggroup || group == vector_reggroup;
2040
2041 return group == all_reggroup;
2042}
2043
2044/* Implement the "pseudo_register_read_value" gdbarch method. */
2045
2046static struct value *
2047aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2048 struct regcache *regcache,
2049 int regnum)
2050{
2051 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2052 struct value *result_value;
2053 gdb_byte *buf;
2054
2055 result_value = allocate_value (register_type (gdbarch, regnum));
2056 VALUE_LVAL (result_value) = lval_register;
2057 VALUE_REGNUM (result_value) = regnum;
2058 buf = value_contents_raw (result_value);
2059
2060 regnum -= gdbarch_num_regs (gdbarch);
2061
2062 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2063 {
2064 enum register_status status;
2065 unsigned v_regnum;
2066
2067 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2068 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2069 if (status != REG_VALID)
2070 mark_value_bytes_unavailable (result_value, 0,
2071 TYPE_LENGTH (value_type (result_value)));
2072 else
2073 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2074 return result_value;
2075 }
2076
2077 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2078 {
2079 enum register_status status;
2080 unsigned v_regnum;
2081
2082 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2083 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2084 if (status != REG_VALID)
2085 mark_value_bytes_unavailable (result_value, 0,
2086 TYPE_LENGTH (value_type (result_value)));
2087 else
2088 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2089 return result_value;
2090 }
2091
2092 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2093 {
2094 enum register_status status;
2095 unsigned v_regnum;
2096
2097 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2098 status = regcache_raw_read (regcache, v_regnum, reg_buf);
4bcddace
PL
2099 if (status != REG_VALID)
2100 mark_value_bytes_unavailable (result_value, 0,
2101 TYPE_LENGTH (value_type (result_value)));
2102 else
2103 memcpy (buf, reg_buf, S_REGISTER_SIZE);
07b287a0
MS
2104 return result_value;
2105 }
2106
2107 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2108 {
2109 enum register_status status;
2110 unsigned v_regnum;
2111
2112 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2113 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2114 if (status != REG_VALID)
2115 mark_value_bytes_unavailable (result_value, 0,
2116 TYPE_LENGTH (value_type (result_value)));
2117 else
2118 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2119 return result_value;
2120 }
2121
2122 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2123 {
2124 enum register_status status;
2125 unsigned v_regnum;
2126
2127 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2128 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2129 if (status != REG_VALID)
2130 mark_value_bytes_unavailable (result_value, 0,
2131 TYPE_LENGTH (value_type (result_value)));
2132 else
2133 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2134 return result_value;
2135 }
2136
2137 gdb_assert_not_reached ("regnum out of bound");
2138}
2139
2140/* Implement the "pseudo_register_write" gdbarch method. */
2141
2142static void
2143aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2144 int regnum, const gdb_byte *buf)
2145{
2146 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2147
2148 /* Ensure the register buffer is zero, we want gdb writes of the
2149 various 'scalar' pseudo registers to behavior like architectural
2150 writes, register width bytes are written the remainder are set to
2151 zero. */
2152 memset (reg_buf, 0, sizeof (reg_buf));
2153
2154 regnum -= gdbarch_num_regs (gdbarch);
2155
2156 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2157 {
2158 /* pseudo Q registers */
2159 unsigned v_regnum;
2160
2161 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2162 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2163 regcache_raw_write (regcache, v_regnum, reg_buf);
2164 return;
2165 }
2166
2167 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2168 {
2169 /* pseudo D registers */
2170 unsigned v_regnum;
2171
2172 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2173 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2174 regcache_raw_write (regcache, v_regnum, reg_buf);
2175 return;
2176 }
2177
2178 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2179 {
2180 unsigned v_regnum;
2181
2182 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2183 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2184 regcache_raw_write (regcache, v_regnum, reg_buf);
2185 return;
2186 }
2187
2188 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2189 {
2190 /* pseudo H registers */
2191 unsigned v_regnum;
2192
2193 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2194 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2195 regcache_raw_write (regcache, v_regnum, reg_buf);
2196 return;
2197 }
2198
2199 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2200 {
2201 /* pseudo B registers */
2202 unsigned v_regnum;
2203
2204 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2205 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2206 regcache_raw_write (regcache, v_regnum, reg_buf);
2207 return;
2208 }
2209
2210 gdb_assert_not_reached ("regnum out of bound");
2211}
2212
07b287a0
MS
2213/* Callback function for user_reg_add. */
2214
2215static struct value *
2216value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2217{
9a3c8263 2218 const int *reg_p = (const int *) baton;
07b287a0
MS
2219
2220 return value_of_register (*reg_p, frame);
2221}
2222\f
2223
9404b58f
KM
2224/* Implement the "software_single_step" gdbarch method, needed to
2225 single step through atomic sequences on AArch64. */
2226
2227static int
2228aarch64_software_single_step (struct frame_info *frame)
2229{
2230 struct gdbarch *gdbarch = get_frame_arch (frame);
2231 struct address_space *aspace = get_frame_address_space (frame);
2232 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2233 const int insn_size = 4;
2234 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2235 CORE_ADDR pc = get_frame_pc (frame);
2236 CORE_ADDR breaks[2] = { -1, -1 };
2237 CORE_ADDR loc = pc;
2238 CORE_ADDR closing_insn = 0;
2239 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2240 byte_order_for_code);
2241 int index;
2242 int insn_count;
2243 int bc_insn_count = 0; /* Conditional branch instruction count. */
2244 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
f77ee802
YQ
2245 aarch64_inst inst;
2246
43cdf5ae 2247 if (aarch64_decode_insn (insn, &inst, 1) != 0)
f77ee802 2248 return 0;
9404b58f
KM
2249
2250 /* Look for a Load Exclusive instruction which begins the sequence. */
f77ee802 2251 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
9404b58f
KM
2252 return 0;
2253
2254 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2255 {
9404b58f
KM
2256 loc += insn_size;
2257 insn = read_memory_unsigned_integer (loc, insn_size,
2258 byte_order_for_code);
2259
43cdf5ae 2260 if (aarch64_decode_insn (insn, &inst, 1) != 0)
f77ee802 2261 return 0;
9404b58f 2262 /* Check if the instruction is a conditional branch. */
f77ee802 2263 if (inst.opcode->iclass == condbranch)
9404b58f 2264 {
f77ee802
YQ
2265 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2266
9404b58f
KM
2267 if (bc_insn_count >= 1)
2268 return 0;
2269
2270 /* It is, so we'll try to set a breakpoint at the destination. */
f77ee802 2271 breaks[1] = loc + inst.operands[0].imm.value;
9404b58f
KM
2272
2273 bc_insn_count++;
2274 last_breakpoint++;
2275 }
2276
2277 /* Look for the Store Exclusive which closes the atomic sequence. */
f77ee802 2278 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
9404b58f
KM
2279 {
2280 closing_insn = loc;
2281 break;
2282 }
2283 }
2284
2285 /* We didn't find a closing Store Exclusive instruction, fall back. */
2286 if (!closing_insn)
2287 return 0;
2288
2289 /* Insert breakpoint after the end of the atomic sequence. */
2290 breaks[0] = loc + insn_size;
2291
2292 /* Check for duplicated breakpoints, and also check that the second
2293 breakpoint is not within the atomic sequence. */
2294 if (last_breakpoint
2295 && (breaks[1] == breaks[0]
2296 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2297 last_breakpoint = 0;
2298
2299 /* Insert the breakpoint at the end of the sequence, and one at the
2300 destination of the conditional branch, if it exists. */
2301 for (index = 0; index <= last_breakpoint; index++)
2302 insert_single_step_breakpoint (gdbarch, aspace, breaks[index]);
2303
2304 return 1;
2305}
2306
b6542f81
YQ
2307struct displaced_step_closure
2308{
2309 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2310 is being displaced stepping. */
2311 int cond;
2312
2313 /* PC adjustment offset after displaced stepping. */
2314 int32_t pc_adjust;
2315};
2316
2317/* Data when visiting instructions for displaced stepping. */
2318
2319struct aarch64_displaced_step_data
2320{
2321 struct aarch64_insn_data base;
2322
2323 /* The address where the instruction will be executed at. */
2324 CORE_ADDR new_addr;
2325 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2326 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2327 /* Number of instructions in INSN_BUF. */
2328 unsigned insn_count;
2329 /* Registers when doing displaced stepping. */
2330 struct regcache *regs;
2331
2332 struct displaced_step_closure *dsc;
2333};
2334
2335/* Implementation of aarch64_insn_visitor method "b". */
2336
2337static void
2338aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2339 struct aarch64_insn_data *data)
2340{
2341 struct aarch64_displaced_step_data *dsd
2342 = (struct aarch64_displaced_step_data *) data;
2343 int32_t new_offset = data->insn_addr - dsd->new_addr + offset;
2344
2345 if (can_encode_int32 (new_offset, 28))
2346 {
2347 /* Emit B rather than BL, because executing BL on a new address
2348 will get the wrong address into LR. In order to avoid this,
2349 we emit B, and update LR if the instruction is BL. */
2350 emit_b (dsd->insn_buf, 0, new_offset);
2351 dsd->insn_count++;
2352 }
2353 else
2354 {
2355 /* Write NOP. */
2356 emit_nop (dsd->insn_buf);
2357 dsd->insn_count++;
2358 dsd->dsc->pc_adjust = offset;
2359 }
2360
2361 if (is_bl)
2362 {
2363 /* Update LR. */
2364 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2365 data->insn_addr + 4);
2366 }
2367}
2368
2369/* Implementation of aarch64_insn_visitor method "b_cond". */
2370
2371static void
2372aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2373 struct aarch64_insn_data *data)
2374{
2375 struct aarch64_displaced_step_data *dsd
2376 = (struct aarch64_displaced_step_data *) data;
2377 int32_t new_offset = data->insn_addr - dsd->new_addr + offset;
2378
2379 /* GDB has to fix up PC after displaced step this instruction
2380 differently according to the condition is true or false. Instead
2381 of checking COND against conditional flags, we can use
2382 the following instructions, and GDB can tell how to fix up PC
2383 according to the PC value.
2384
2385 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2386 INSN1 ;
2387 TAKEN:
2388 INSN2
2389 */
2390
2391 emit_bcond (dsd->insn_buf, cond, 8);
2392 dsd->dsc->cond = 1;
2393 dsd->dsc->pc_adjust = offset;
2394 dsd->insn_count = 1;
2395}
2396
2397/* Dynamically allocate a new register. If we know the register
2398 statically, we should make it a global as above instead of using this
2399 helper function. */
2400
2401static struct aarch64_register
2402aarch64_register (unsigned num, int is64)
2403{
2404 return (struct aarch64_register) { num, is64 };
2405}
2406
2407/* Implementation of aarch64_insn_visitor method "cb". */
2408
2409static void
2410aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2411 const unsigned rn, int is64,
2412 struct aarch64_insn_data *data)
2413{
2414 struct aarch64_displaced_step_data *dsd
2415 = (struct aarch64_displaced_step_data *) data;
2416 int32_t new_offset = data->insn_addr - dsd->new_addr + offset;
2417
2418 /* The offset is out of range for a compare and branch
2419 instruction. We can use the following instructions instead:
2420
2421 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2422 INSN1 ;
2423 TAKEN:
2424 INSN2
2425 */
2426 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2427 dsd->insn_count = 1;
2428 dsd->dsc->cond = 1;
2429 dsd->dsc->pc_adjust = offset;
2430}
2431
2432/* Implementation of aarch64_insn_visitor method "tb". */
2433
2434static void
2435aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2436 const unsigned rt, unsigned bit,
2437 struct aarch64_insn_data *data)
2438{
2439 struct aarch64_displaced_step_data *dsd
2440 = (struct aarch64_displaced_step_data *) data;
2441 int32_t new_offset = data->insn_addr - dsd->new_addr + offset;
2442
2443 /* The offset is out of range for a test bit and branch
2444 instruction We can use the following instructions instead:
2445
2446 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2447 INSN1 ;
2448 TAKEN:
2449 INSN2
2450
2451 */
2452 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2453 dsd->insn_count = 1;
2454 dsd->dsc->cond = 1;
2455 dsd->dsc->pc_adjust = offset;
2456}
2457
2458/* Implementation of aarch64_insn_visitor method "adr". */
2459
2460static void
2461aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2462 const int is_adrp, struct aarch64_insn_data *data)
2463{
2464 struct aarch64_displaced_step_data *dsd
2465 = (struct aarch64_displaced_step_data *) data;
2466 /* We know exactly the address the ADR{P,} instruction will compute.
2467 We can just write it to the destination register. */
2468 CORE_ADDR address = data->insn_addr + offset;
2469
2470 if (is_adrp)
2471 {
2472 /* Clear the lower 12 bits of the offset to get the 4K page. */
2473 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2474 address & ~0xfff);
2475 }
2476 else
2477 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2478 address);
2479
2480 dsd->dsc->pc_adjust = 4;
2481 emit_nop (dsd->insn_buf);
2482 dsd->insn_count = 1;
2483}
2484
2485/* Implementation of aarch64_insn_visitor method "ldr_literal". */
2486
2487static void
2488aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2489 const unsigned rt, const int is64,
2490 struct aarch64_insn_data *data)
2491{
2492 struct aarch64_displaced_step_data *dsd
2493 = (struct aarch64_displaced_step_data *) data;
2494 CORE_ADDR address = data->insn_addr + offset;
2495 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2496
2497 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2498 address);
2499
2500 if (is_sw)
2501 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2502 aarch64_register (rt, 1), zero);
2503 else
2504 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2505 aarch64_register (rt, 1), zero);
2506
2507 dsd->dsc->pc_adjust = 4;
2508}
2509
2510/* Implementation of aarch64_insn_visitor method "others". */
2511
2512static void
2513aarch64_displaced_step_others (const uint32_t insn,
2514 struct aarch64_insn_data *data)
2515{
2516 struct aarch64_displaced_step_data *dsd
2517 = (struct aarch64_displaced_step_data *) data;
2518
e1c587c3 2519 aarch64_emit_insn (dsd->insn_buf, insn);
b6542f81
YQ
2520 dsd->insn_count = 1;
2521
2522 if ((insn & 0xfffffc1f) == 0xd65f0000)
2523 {
2524 /* RET */
2525 dsd->dsc->pc_adjust = 0;
2526 }
2527 else
2528 dsd->dsc->pc_adjust = 4;
2529}
2530
2531static const struct aarch64_insn_visitor visitor =
2532{
2533 aarch64_displaced_step_b,
2534 aarch64_displaced_step_b_cond,
2535 aarch64_displaced_step_cb,
2536 aarch64_displaced_step_tb,
2537 aarch64_displaced_step_adr,
2538 aarch64_displaced_step_ldr_literal,
2539 aarch64_displaced_step_others,
2540};
2541
2542/* Implement the "displaced_step_copy_insn" gdbarch method. */
2543
2544struct displaced_step_closure *
2545aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2546 CORE_ADDR from, CORE_ADDR to,
2547 struct regcache *regs)
2548{
2549 struct displaced_step_closure *dsc = NULL;
2550 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2551 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2552 struct aarch64_displaced_step_data dsd;
c86a40c6
YQ
2553 aarch64_inst inst;
2554
2555 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2556 return NULL;
b6542f81
YQ
2557
2558 /* Look for a Load Exclusive instruction which begins the sequence. */
c86a40c6 2559 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
b6542f81
YQ
2560 {
2561 /* We can't displaced step atomic sequences. */
2562 return NULL;
2563 }
2564
2565 dsc = XCNEW (struct displaced_step_closure);
2566 dsd.base.insn_addr = from;
2567 dsd.new_addr = to;
2568 dsd.regs = regs;
2569 dsd.dsc = dsc;
034f1a81 2570 dsd.insn_count = 0;
b6542f81
YQ
2571 aarch64_relocate_instruction (insn, &visitor,
2572 (struct aarch64_insn_data *) &dsd);
2573 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2574
2575 if (dsd.insn_count != 0)
2576 {
2577 int i;
2578
2579 /* Instruction can be relocated to scratch pad. Copy
2580 relocated instruction(s) there. */
2581 for (i = 0; i < dsd.insn_count; i++)
2582 {
2583 if (debug_displaced)
2584 {
2585 debug_printf ("displaced: writing insn ");
2586 debug_printf ("%.8x", dsd.insn_buf[i]);
2587 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2588 }
2589 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2590 (ULONGEST) dsd.insn_buf[i]);
2591 }
2592 }
2593 else
2594 {
2595 xfree (dsc);
2596 dsc = NULL;
2597 }
2598
2599 return dsc;
2600}
2601
2602/* Implement the "displaced_step_fixup" gdbarch method. */
2603
2604void
2605aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
2606 struct displaced_step_closure *dsc,
2607 CORE_ADDR from, CORE_ADDR to,
2608 struct regcache *regs)
2609{
2610 if (dsc->cond)
2611 {
2612 ULONGEST pc;
2613
2614 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2615 if (pc - to == 8)
2616 {
2617 /* Condition is true. */
2618 }
2619 else if (pc - to == 4)
2620 {
2621 /* Condition is false. */
2622 dsc->pc_adjust = 4;
2623 }
2624 else
2625 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2626 }
2627
2628 if (dsc->pc_adjust != 0)
2629 {
2630 if (debug_displaced)
2631 {
2632 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2633 paddress (gdbarch, from), dsc->pc_adjust);
2634 }
2635 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2636 from + dsc->pc_adjust);
2637 }
2638}
2639
2640/* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2641
2642int
2643aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2644 struct displaced_step_closure *closure)
2645{
2646 return 1;
2647}
2648
07b287a0
MS
2649/* Initialize the current architecture based on INFO. If possible,
2650 re-use an architecture from ARCHES, which is a list of
2651 architectures already created during this debugging session.
2652
2653 Called e.g. at program startup, when reading a core file, and when
2654 reading a binary file. */
2655
2656static struct gdbarch *
2657aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2658{
2659 struct gdbarch_tdep *tdep;
2660 struct gdbarch *gdbarch;
2661 struct gdbarch_list *best_arch;
2662 struct tdesc_arch_data *tdesc_data = NULL;
2663 const struct target_desc *tdesc = info.target_desc;
2664 int i;
2665 int have_fpa_registers = 1;
2666 int valid_p = 1;
2667 const struct tdesc_feature *feature;
2668 int num_regs = 0;
2669 int num_pseudo_regs = 0;
2670
2671 /* Ensure we always have a target descriptor. */
2672 if (!tdesc_has_registers (tdesc))
2673 tdesc = tdesc_aarch64;
2674
2675 gdb_assert (tdesc);
2676
2677 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2678
2679 if (feature == NULL)
2680 return NULL;
2681
2682 tdesc_data = tdesc_data_alloc ();
2683
2684 /* Validate the descriptor provides the mandatory core R registers
2685 and allocate their numbers. */
2686 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2687 valid_p &=
2688 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2689 aarch64_r_register_names[i]);
2690
2691 num_regs = AARCH64_X0_REGNUM + i;
2692
2693 /* Look for the V registers. */
2694 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2695 if (feature)
2696 {
2697 /* Validate the descriptor provides the mandatory V registers
2698 and allocate their numbers. */
2699 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2700 valid_p &=
2701 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2702 aarch64_v_register_names[i]);
2703
2704 num_regs = AARCH64_V0_REGNUM + i;
2705
2706 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2707 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2708 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2709 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2710 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2711 }
2712
2713 if (!valid_p)
2714 {
2715 tdesc_data_cleanup (tdesc_data);
2716 return NULL;
2717 }
2718
2719 /* AArch64 code is always little-endian. */
2720 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2721
2722 /* If there is already a candidate, use it. */
2723 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2724 best_arch != NULL;
2725 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2726 {
2727 /* Found a match. */
2728 break;
2729 }
2730
2731 if (best_arch != NULL)
2732 {
2733 if (tdesc_data != NULL)
2734 tdesc_data_cleanup (tdesc_data);
2735 return best_arch->gdbarch;
2736 }
2737
8d749320 2738 tdep = XCNEW (struct gdbarch_tdep);
07b287a0
MS
2739 gdbarch = gdbarch_alloc (&info, tdep);
2740
2741 /* This should be low enough for everything. */
2742 tdep->lowest_pc = 0x20;
2743 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2744 tdep->jb_elt_size = 8;
2745
2746 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2747 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2748
07b287a0
MS
2749 /* Frame handling. */
2750 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2751 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2752 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2753
2754 /* Advance PC across function entry code. */
2755 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2756
2757 /* The stack grows downward. */
2758 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2759
2760 /* Breakpoint manipulation. */
2761 set_gdbarch_breakpoint_from_pc (gdbarch, aarch64_breakpoint_from_pc);
07b287a0 2762 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
9404b58f 2763 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
07b287a0
MS
2764
2765 /* Information about registers, etc. */
2766 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2767 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2768 set_gdbarch_num_regs (gdbarch, num_regs);
2769
2770 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2771 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2772 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2773 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2774 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2775 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2776 aarch64_pseudo_register_reggroup_p);
2777
2778 /* ABI */
2779 set_gdbarch_short_bit (gdbarch, 16);
2780 set_gdbarch_int_bit (gdbarch, 32);
2781 set_gdbarch_float_bit (gdbarch, 32);
2782 set_gdbarch_double_bit (gdbarch, 64);
2783 set_gdbarch_long_double_bit (gdbarch, 128);
2784 set_gdbarch_long_bit (gdbarch, 64);
2785 set_gdbarch_long_long_bit (gdbarch, 64);
2786 set_gdbarch_ptr_bit (gdbarch, 64);
2787 set_gdbarch_char_signed (gdbarch, 0);
2788 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2789 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2790 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2791
2792 /* Internal <-> external register number maps. */
2793 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
2794
2795 /* Returning results. */
2796 set_gdbarch_return_value (gdbarch, aarch64_return_value);
2797
2798 /* Disassembly. */
2799 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
2800
2801 /* Virtual tables. */
2802 set_gdbarch_vbit_in_delta (gdbarch, 1);
2803
2804 /* Hook in the ABI-specific overrides, if they have been registered. */
2805 info.target_desc = tdesc;
2806 info.tdep_info = (void *) tdesc_data;
2807 gdbarch_init_osabi (info, gdbarch);
2808
2809 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
2810
2811 /* Add some default predicates. */
2812 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
2813 dwarf2_append_unwinders (gdbarch);
2814 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
2815
2816 frame_base_set_default (gdbarch, &aarch64_normal_base);
2817
2818 /* Now we have tuned the configuration, set a few final things,
2819 based on what the OS ABI has told us. */
2820
2821 if (tdep->jb_pc >= 0)
2822 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
2823
ea873d8e
PL
2824 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
2825
07b287a0
MS
2826 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
2827
2828 /* Add standard register aliases. */
2829 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
2830 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
2831 value_of_aarch64_user_reg,
2832 &aarch64_register_aliases[i].regnum);
2833
2834 return gdbarch;
2835}
2836
2837static void
2838aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
2839{
2840 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2841
2842 if (tdep == NULL)
2843 return;
2844
2845 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
2846 paddress (gdbarch, tdep->lowest_pc));
2847}
2848
2849/* Suppress warning from -Wmissing-prototypes. */
2850extern initialize_file_ftype _initialize_aarch64_tdep;
2851
2852void
2853_initialize_aarch64_tdep (void)
2854{
2855 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
2856 aarch64_dump_tdep);
2857
2858 initialize_tdesc_aarch64 ();
07b287a0
MS
2859
2860 /* Debug this file's internals. */
2861 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
2862Set AArch64 debugging."), _("\
2863Show AArch64 debugging."), _("\
2864When on, AArch64 specific debugging is enabled."),
2865 NULL,
2866 show_aarch64_debug,
2867 &setdebuglist, &showdebuglist);
2868}
99afc88b
OJ
2869
2870/* AArch64 process record-replay related structures, defines etc. */
2871
99afc88b
OJ
2872#define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
2873 do \
2874 { \
2875 unsigned int reg_len = LENGTH; \
2876 if (reg_len) \
2877 { \
2878 REGS = XNEWVEC (uint32_t, reg_len); \
2879 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
2880 } \
2881 } \
2882 while (0)
2883
2884#define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
2885 do \
2886 { \
2887 unsigned int mem_len = LENGTH; \
2888 if (mem_len) \
2889 { \
2890 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
2891 memcpy(&MEMS->len, &RECORD_BUF[0], \
2892 sizeof(struct aarch64_mem_r) * LENGTH); \
2893 } \
2894 } \
2895 while (0)
2896
2897/* AArch64 record/replay structures and enumerations. */
2898
2899struct aarch64_mem_r
2900{
2901 uint64_t len; /* Record length. */
2902 uint64_t addr; /* Memory address. */
2903};
2904
2905enum aarch64_record_result
2906{
2907 AARCH64_RECORD_SUCCESS,
2908 AARCH64_RECORD_FAILURE,
2909 AARCH64_RECORD_UNSUPPORTED,
2910 AARCH64_RECORD_UNKNOWN
2911};
2912
2913typedef struct insn_decode_record_t
2914{
2915 struct gdbarch *gdbarch;
2916 struct regcache *regcache;
2917 CORE_ADDR this_addr; /* Address of insn to be recorded. */
2918 uint32_t aarch64_insn; /* Insn to be recorded. */
2919 uint32_t mem_rec_count; /* Count of memory records. */
2920 uint32_t reg_rec_count; /* Count of register records. */
2921 uint32_t *aarch64_regs; /* Registers to be recorded. */
2922 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
2923} insn_decode_record;
2924
2925/* Record handler for data processing - register instructions. */
2926
2927static unsigned int
2928aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
2929{
2930 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
2931 uint32_t record_buf[4];
2932
2933 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2934 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2935 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
2936
2937 if (!bit (aarch64_insn_r->aarch64_insn, 28))
2938 {
2939 uint8_t setflags;
2940
2941 /* Logical (shifted register). */
2942 if (insn_bits24_27 == 0x0a)
2943 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
2944 /* Add/subtract. */
2945 else if (insn_bits24_27 == 0x0b)
2946 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
2947 else
2948 return AARCH64_RECORD_UNKNOWN;
2949
2950 record_buf[0] = reg_rd;
2951 aarch64_insn_r->reg_rec_count = 1;
2952 if (setflags)
2953 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2954 }
2955 else
2956 {
2957 if (insn_bits24_27 == 0x0b)
2958 {
2959 /* Data-processing (3 source). */
2960 record_buf[0] = reg_rd;
2961 aarch64_insn_r->reg_rec_count = 1;
2962 }
2963 else if (insn_bits24_27 == 0x0a)
2964 {
2965 if (insn_bits21_23 == 0x00)
2966 {
2967 /* Add/subtract (with carry). */
2968 record_buf[0] = reg_rd;
2969 aarch64_insn_r->reg_rec_count = 1;
2970 if (bit (aarch64_insn_r->aarch64_insn, 29))
2971 {
2972 record_buf[1] = AARCH64_CPSR_REGNUM;
2973 aarch64_insn_r->reg_rec_count = 2;
2974 }
2975 }
2976 else if (insn_bits21_23 == 0x02)
2977 {
2978 /* Conditional compare (register) and conditional compare
2979 (immediate) instructions. */
2980 record_buf[0] = AARCH64_CPSR_REGNUM;
2981 aarch64_insn_r->reg_rec_count = 1;
2982 }
2983 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
2984 {
2985 /* CConditional select. */
2986 /* Data-processing (2 source). */
2987 /* Data-processing (1 source). */
2988 record_buf[0] = reg_rd;
2989 aarch64_insn_r->reg_rec_count = 1;
2990 }
2991 else
2992 return AARCH64_RECORD_UNKNOWN;
2993 }
2994 }
2995
2996 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
2997 record_buf);
2998 return AARCH64_RECORD_SUCCESS;
2999}
3000
3001/* Record handler for data processing - immediate instructions. */
3002
3003static unsigned int
3004aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3005{
3006 uint8_t reg_rd, insn_bit28, insn_bit23, insn_bits24_27, setflags;
3007 uint32_t record_buf[4];
3008
3009 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3010 insn_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3011 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3012 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3013
3014 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3015 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3016 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3017 {
3018 record_buf[0] = reg_rd;
3019 aarch64_insn_r->reg_rec_count = 1;
3020 }
3021 else if (insn_bits24_27 == 0x01)
3022 {
3023 /* Add/Subtract (immediate). */
3024 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3025 record_buf[0] = reg_rd;
3026 aarch64_insn_r->reg_rec_count = 1;
3027 if (setflags)
3028 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3029 }
3030 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3031 {
3032 /* Logical (immediate). */
3033 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3034 record_buf[0] = reg_rd;
3035 aarch64_insn_r->reg_rec_count = 1;
3036 if (setflags)
3037 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3038 }
3039 else
3040 return AARCH64_RECORD_UNKNOWN;
3041
3042 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3043 record_buf);
3044 return AARCH64_RECORD_SUCCESS;
3045}
3046
3047/* Record handler for branch, exception generation and system instructions. */
3048
3049static unsigned int
3050aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3051{
3052 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3053 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3054 uint32_t record_buf[4];
3055
3056 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3057 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3058 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3059
3060 if (insn_bits28_31 == 0x0d)
3061 {
3062 /* Exception generation instructions. */
3063 if (insn_bits24_27 == 0x04)
3064 {
5d98d3cd
YQ
3065 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3066 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3067 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
99afc88b
OJ
3068 {
3069 ULONGEST svc_number;
3070
3071 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3072 &svc_number);
3073 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3074 svc_number);
3075 }
3076 else
3077 return AARCH64_RECORD_UNSUPPORTED;
3078 }
3079 /* System instructions. */
3080 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3081 {
3082 uint32_t reg_rt, reg_crn;
3083
3084 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3085 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3086
3087 /* Record rt in case of sysl and mrs instructions. */
3088 if (bit (aarch64_insn_r->aarch64_insn, 21))
3089 {
3090 record_buf[0] = reg_rt;
3091 aarch64_insn_r->reg_rec_count = 1;
3092 }
3093 /* Record cpsr for hint and msr(immediate) instructions. */
3094 else if (reg_crn == 0x02 || reg_crn == 0x04)
3095 {
3096 record_buf[0] = AARCH64_CPSR_REGNUM;
3097 aarch64_insn_r->reg_rec_count = 1;
3098 }
3099 }
3100 /* Unconditional branch (register). */
3101 else if((insn_bits24_27 & 0x0e) == 0x06)
3102 {
3103 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3104 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3105 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3106 }
3107 else
3108 return AARCH64_RECORD_UNKNOWN;
3109 }
3110 /* Unconditional branch (immediate). */
3111 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3112 {
3113 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3114 if (bit (aarch64_insn_r->aarch64_insn, 31))
3115 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3116 }
3117 else
3118 /* Compare & branch (immediate), Test & branch (immediate) and
3119 Conditional branch (immediate). */
3120 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3121
3122 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3123 record_buf);
3124 return AARCH64_RECORD_SUCCESS;
3125}
3126
3127/* Record handler for advanced SIMD load and store instructions. */
3128
3129static unsigned int
3130aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3131{
3132 CORE_ADDR address;
3133 uint64_t addr_offset = 0;
3134 uint32_t record_buf[24];
3135 uint64_t record_buf_mem[24];
3136 uint32_t reg_rn, reg_rt;
3137 uint32_t reg_index = 0, mem_index = 0;
3138 uint8_t opcode_bits, size_bits;
3139
3140 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3141 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3142 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3143 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3144 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3145
3146 if (record_debug)
b277c936 3147 debug_printf ("Process record: Advanced SIMD load/store\n");
99afc88b
OJ
3148
3149 /* Load/store single structure. */
3150 if (bit (aarch64_insn_r->aarch64_insn, 24))
3151 {
3152 uint8_t sindex, scale, selem, esize, replicate = 0;
3153 scale = opcode_bits >> 2;
3154 selem = ((opcode_bits & 0x02) |
3155 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3156 switch (scale)
3157 {
3158 case 1:
3159 if (size_bits & 0x01)
3160 return AARCH64_RECORD_UNKNOWN;
3161 break;
3162 case 2:
3163 if ((size_bits >> 1) & 0x01)
3164 return AARCH64_RECORD_UNKNOWN;
3165 if (size_bits & 0x01)
3166 {
3167 if (!((opcode_bits >> 1) & 0x01))
3168 scale = 3;
3169 else
3170 return AARCH64_RECORD_UNKNOWN;
3171 }
3172 break;
3173 case 3:
3174 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3175 {
3176 scale = size_bits;
3177 replicate = 1;
3178 break;
3179 }
3180 else
3181 return AARCH64_RECORD_UNKNOWN;
3182 default:
3183 break;
3184 }
3185 esize = 8 << scale;
3186 if (replicate)
3187 for (sindex = 0; sindex < selem; sindex++)
3188 {
3189 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3190 reg_rt = (reg_rt + 1) % 32;
3191 }
3192 else
3193 {
3194 for (sindex = 0; sindex < selem; sindex++)
3195 if (bit (aarch64_insn_r->aarch64_insn, 22))
3196 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3197 else
3198 {
3199 record_buf_mem[mem_index++] = esize / 8;
3200 record_buf_mem[mem_index++] = address + addr_offset;
3201 }
3202 addr_offset = addr_offset + (esize / 8);
3203 reg_rt = (reg_rt + 1) % 32;
3204 }
3205 }
3206 /* Load/store multiple structure. */
3207 else
3208 {
3209 uint8_t selem, esize, rpt, elements;
3210 uint8_t eindex, rindex;
3211
3212 esize = 8 << size_bits;
3213 if (bit (aarch64_insn_r->aarch64_insn, 30))
3214 elements = 128 / esize;
3215 else
3216 elements = 64 / esize;
3217
3218 switch (opcode_bits)
3219 {
3220 /*LD/ST4 (4 Registers). */
3221 case 0:
3222 rpt = 1;
3223 selem = 4;
3224 break;
3225 /*LD/ST1 (4 Registers). */
3226 case 2:
3227 rpt = 4;
3228 selem = 1;
3229 break;
3230 /*LD/ST3 (3 Registers). */
3231 case 4:
3232 rpt = 1;
3233 selem = 3;
3234 break;
3235 /*LD/ST1 (3 Registers). */
3236 case 6:
3237 rpt = 3;
3238 selem = 1;
3239 break;
3240 /*LD/ST1 (1 Register). */
3241 case 7:
3242 rpt = 1;
3243 selem = 1;
3244 break;
3245 /*LD/ST2 (2 Registers). */
3246 case 8:
3247 rpt = 1;
3248 selem = 2;
3249 break;
3250 /*LD/ST1 (2 Registers). */
3251 case 10:
3252 rpt = 2;
3253 selem = 1;
3254 break;
3255 default:
3256 return AARCH64_RECORD_UNSUPPORTED;
3257 break;
3258 }
3259 for (rindex = 0; rindex < rpt; rindex++)
3260 for (eindex = 0; eindex < elements; eindex++)
3261 {
3262 uint8_t reg_tt, sindex;
3263 reg_tt = (reg_rt + rindex) % 32;
3264 for (sindex = 0; sindex < selem; sindex++)
3265 {
3266 if (bit (aarch64_insn_r->aarch64_insn, 22))
3267 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3268 else
3269 {
3270 record_buf_mem[mem_index++] = esize / 8;
3271 record_buf_mem[mem_index++] = address + addr_offset;
3272 }
3273 addr_offset = addr_offset + (esize / 8);
3274 reg_tt = (reg_tt + 1) % 32;
3275 }
3276 }
3277 }
3278
3279 if (bit (aarch64_insn_r->aarch64_insn, 23))
3280 record_buf[reg_index++] = reg_rn;
3281
3282 aarch64_insn_r->reg_rec_count = reg_index;
3283 aarch64_insn_r->mem_rec_count = mem_index / 2;
3284 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3285 record_buf_mem);
3286 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3287 record_buf);
3288 return AARCH64_RECORD_SUCCESS;
3289}
3290
3291/* Record handler for load and store instructions. */
3292
3293static unsigned int
3294aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3295{
3296 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3297 uint8_t insn_bit23, insn_bit21;
3298 uint8_t opc, size_bits, ld_flag, vector_flag;
3299 uint32_t reg_rn, reg_rt, reg_rt2;
3300 uint64_t datasize, offset;
3301 uint32_t record_buf[8];
3302 uint64_t record_buf_mem[8];
3303 CORE_ADDR address;
3304
3305 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3306 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3307 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3308 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3309 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3310 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3311 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3312 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3313 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3314 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3315 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3316
3317 /* Load/store exclusive. */
3318 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3319 {
3320 if (record_debug)
b277c936 3321 debug_printf ("Process record: load/store exclusive\n");
99afc88b
OJ
3322
3323 if (ld_flag)
3324 {
3325 record_buf[0] = reg_rt;
3326 aarch64_insn_r->reg_rec_count = 1;
3327 if (insn_bit21)
3328 {
3329 record_buf[1] = reg_rt2;
3330 aarch64_insn_r->reg_rec_count = 2;
3331 }
3332 }
3333 else
3334 {
3335 if (insn_bit21)
3336 datasize = (8 << size_bits) * 2;
3337 else
3338 datasize = (8 << size_bits);
3339 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3340 &address);
3341 record_buf_mem[0] = datasize / 8;
3342 record_buf_mem[1] = address;
3343 aarch64_insn_r->mem_rec_count = 1;
3344 if (!insn_bit23)
3345 {
3346 /* Save register rs. */
3347 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3348 aarch64_insn_r->reg_rec_count = 1;
3349 }
3350 }
3351 }
3352 /* Load register (literal) instructions decoding. */
3353 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3354 {
3355 if (record_debug)
b277c936 3356 debug_printf ("Process record: load register (literal)\n");
99afc88b
OJ
3357 if (vector_flag)
3358 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3359 else
3360 record_buf[0] = reg_rt;
3361 aarch64_insn_r->reg_rec_count = 1;
3362 }
3363 /* All types of load/store pair instructions decoding. */
3364 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3365 {
3366 if (record_debug)
b277c936 3367 debug_printf ("Process record: load/store pair\n");
99afc88b
OJ
3368
3369 if (ld_flag)
3370 {
3371 if (vector_flag)
3372 {
3373 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3374 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3375 }
3376 else
3377 {
3378 record_buf[0] = reg_rt;
3379 record_buf[1] = reg_rt2;
3380 }
3381 aarch64_insn_r->reg_rec_count = 2;
3382 }
3383 else
3384 {
3385 uint16_t imm7_off;
3386 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3387 if (!vector_flag)
3388 size_bits = size_bits >> 1;
3389 datasize = 8 << (2 + size_bits);
3390 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3391 offset = offset << (2 + size_bits);
3392 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3393 &address);
3394 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3395 {
3396 if (imm7_off & 0x40)
3397 address = address - offset;
3398 else
3399 address = address + offset;
3400 }
3401
3402 record_buf_mem[0] = datasize / 8;
3403 record_buf_mem[1] = address;
3404 record_buf_mem[2] = datasize / 8;
3405 record_buf_mem[3] = address + (datasize / 8);
3406 aarch64_insn_r->mem_rec_count = 2;
3407 }
3408 if (bit (aarch64_insn_r->aarch64_insn, 23))
3409 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3410 }
3411 /* Load/store register (unsigned immediate) instructions. */
3412 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3413 {
3414 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3415 if (!(opc >> 1))
3416 if (opc & 0x01)
3417 ld_flag = 0x01;
3418 else
3419 ld_flag = 0x0;
3420 else
3421 if (size_bits != 0x03)
3422 ld_flag = 0x01;
3423 else
3424 return AARCH64_RECORD_UNKNOWN;
3425
3426 if (record_debug)
3427 {
b277c936
PL
3428 debug_printf ("Process record: load/store (unsigned immediate):"
3429 " size %x V %d opc %x\n", size_bits, vector_flag,
3430 opc);
99afc88b
OJ
3431 }
3432
3433 if (!ld_flag)
3434 {
3435 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3436 datasize = 8 << size_bits;
3437 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3438 &address);
3439 offset = offset << size_bits;
3440 address = address + offset;
3441
3442 record_buf_mem[0] = datasize >> 3;
3443 record_buf_mem[1] = address;
3444 aarch64_insn_r->mem_rec_count = 1;
3445 }
3446 else
3447 {
3448 if (vector_flag)
3449 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3450 else
3451 record_buf[0] = reg_rt;
3452 aarch64_insn_r->reg_rec_count = 1;
3453 }
3454 }
3455 /* Load/store register (register offset) instructions. */
5d98d3cd
YQ
3456 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3457 && insn_bits10_11 == 0x02 && insn_bit21)
99afc88b
OJ
3458 {
3459 if (record_debug)
b277c936 3460 debug_printf ("Process record: load/store (register offset)\n");
99afc88b
OJ
3461 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3462 if (!(opc >> 1))
3463 if (opc & 0x01)
3464 ld_flag = 0x01;
3465 else
3466 ld_flag = 0x0;
3467 else
3468 if (size_bits != 0x03)
3469 ld_flag = 0x01;
3470 else
3471 return AARCH64_RECORD_UNKNOWN;
3472
3473 if (!ld_flag)
3474 {
3475 uint64_t reg_rm_val;
3476 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3477 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3478 if (bit (aarch64_insn_r->aarch64_insn, 12))
3479 offset = reg_rm_val << size_bits;
3480 else
3481 offset = reg_rm_val;
3482 datasize = 8 << size_bits;
3483 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3484 &address);
3485 address = address + offset;
3486 record_buf_mem[0] = datasize >> 3;
3487 record_buf_mem[1] = address;
3488 aarch64_insn_r->mem_rec_count = 1;
3489 }
3490 else
3491 {
3492 if (vector_flag)
3493 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3494 else
3495 record_buf[0] = reg_rt;
3496 aarch64_insn_r->reg_rec_count = 1;
3497 }
3498 }
3499 /* Load/store register (immediate and unprivileged) instructions. */
5d98d3cd
YQ
3500 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3501 && !insn_bit21)
99afc88b
OJ
3502 {
3503 if (record_debug)
3504 {
b277c936
PL
3505 debug_printf ("Process record: load/store "
3506 "(immediate and unprivileged)\n");
99afc88b
OJ
3507 }
3508 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3509 if (!(opc >> 1))
3510 if (opc & 0x01)
3511 ld_flag = 0x01;
3512 else
3513 ld_flag = 0x0;
3514 else
3515 if (size_bits != 0x03)
3516 ld_flag = 0x01;
3517 else
3518 return AARCH64_RECORD_UNKNOWN;
3519
3520 if (!ld_flag)
3521 {
3522 uint16_t imm9_off;
3523 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3524 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3525 datasize = 8 << size_bits;
3526 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3527 &address);
3528 if (insn_bits10_11 != 0x01)
3529 {
3530 if (imm9_off & 0x0100)
3531 address = address - offset;
3532 else
3533 address = address + offset;
3534 }
3535 record_buf_mem[0] = datasize >> 3;
3536 record_buf_mem[1] = address;
3537 aarch64_insn_r->mem_rec_count = 1;
3538 }
3539 else
3540 {
3541 if (vector_flag)
3542 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3543 else
3544 record_buf[0] = reg_rt;
3545 aarch64_insn_r->reg_rec_count = 1;
3546 }
3547 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3548 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3549 }
3550 /* Advanced SIMD load/store instructions. */
3551 else
3552 return aarch64_record_asimd_load_store (aarch64_insn_r);
3553
3554 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3555 record_buf_mem);
3556 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3557 record_buf);
3558 return AARCH64_RECORD_SUCCESS;
3559}
3560
3561/* Record handler for data processing SIMD and floating point instructions. */
3562
3563static unsigned int
3564aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3565{
3566 uint8_t insn_bit21, opcode, rmode, reg_rd;
3567 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3568 uint8_t insn_bits11_14;
3569 uint32_t record_buf[2];
3570
3571 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3572 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3573 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3574 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3575 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3576 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3577 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3578 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3579 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3580
3581 if (record_debug)
b277c936 3582 debug_printf ("Process record: data processing SIMD/FP: ");
99afc88b
OJ
3583
3584 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3585 {
3586 /* Floating point - fixed point conversion instructions. */
3587 if (!insn_bit21)
3588 {
3589 if (record_debug)
b277c936 3590 debug_printf ("FP - fixed point conversion");
99afc88b
OJ
3591
3592 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3593 record_buf[0] = reg_rd;
3594 else
3595 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3596 }
3597 /* Floating point - conditional compare instructions. */
3598 else if (insn_bits10_11 == 0x01)
3599 {
3600 if (record_debug)
b277c936 3601 debug_printf ("FP - conditional compare");
99afc88b
OJ
3602
3603 record_buf[0] = AARCH64_CPSR_REGNUM;
3604 }
3605 /* Floating point - data processing (2-source) and
3606 conditional select instructions. */
3607 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3608 {
3609 if (record_debug)
b277c936 3610 debug_printf ("FP - DP (2-source)");
99afc88b
OJ
3611
3612 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3613 }
3614 else if (insn_bits10_11 == 0x00)
3615 {
3616 /* Floating point - immediate instructions. */
3617 if ((insn_bits12_15 & 0x01) == 0x01
3618 || (insn_bits12_15 & 0x07) == 0x04)
3619 {
3620 if (record_debug)
b277c936 3621 debug_printf ("FP - immediate");
99afc88b
OJ
3622 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3623 }
3624 /* Floating point - compare instructions. */
3625 else if ((insn_bits12_15 & 0x03) == 0x02)
3626 {
3627 if (record_debug)
b277c936 3628 debug_printf ("FP - immediate");
99afc88b
OJ
3629 record_buf[0] = AARCH64_CPSR_REGNUM;
3630 }
3631 /* Floating point - integer conversions instructions. */
f62fce35 3632 else if (insn_bits12_15 == 0x00)
99afc88b
OJ
3633 {
3634 /* Convert float to integer instruction. */
3635 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3636 {
3637 if (record_debug)
b277c936 3638 debug_printf ("float to int conversion");
99afc88b
OJ
3639
3640 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3641 }
3642 /* Convert integer to float instruction. */
3643 else if ((opcode >> 1) == 0x01 && !rmode)
3644 {
3645 if (record_debug)
b277c936 3646 debug_printf ("int to float conversion");
99afc88b
OJ
3647
3648 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3649 }
3650 /* Move float to integer instruction. */
3651 else if ((opcode >> 1) == 0x03)
3652 {
3653 if (record_debug)
b277c936 3654 debug_printf ("move float to int");
99afc88b
OJ
3655
3656 if (!(opcode & 0x01))
3657 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3658 else
3659 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3660 }
f62fce35
YQ
3661 else
3662 return AARCH64_RECORD_UNKNOWN;
99afc88b 3663 }
f62fce35
YQ
3664 else
3665 return AARCH64_RECORD_UNKNOWN;
99afc88b 3666 }
f62fce35
YQ
3667 else
3668 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
3669 }
3670 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3671 {
3672 if (record_debug)
b277c936 3673 debug_printf ("SIMD copy");
99afc88b
OJ
3674
3675 /* Advanced SIMD copy instructions. */
3676 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3677 && !bit (aarch64_insn_r->aarch64_insn, 15)
3678 && bit (aarch64_insn_r->aarch64_insn, 10))
3679 {
3680 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3681 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3682 else
3683 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3684 }
3685 else
3686 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3687 }
3688 /* All remaining floating point or advanced SIMD instructions. */
3689 else
3690 {
3691 if (record_debug)
b277c936 3692 debug_printf ("all remain");
99afc88b
OJ
3693
3694 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3695 }
3696
3697 if (record_debug)
b277c936 3698 debug_printf ("\n");
99afc88b
OJ
3699
3700 aarch64_insn_r->reg_rec_count++;
3701 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3702 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3703 record_buf);
3704 return AARCH64_RECORD_SUCCESS;
3705}
3706
3707/* Decodes insns type and invokes its record handler. */
3708
3709static unsigned int
3710aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3711{
3712 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3713
3714 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3715 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3716 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3717 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3718
3719 /* Data processing - immediate instructions. */
3720 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3721 return aarch64_record_data_proc_imm (aarch64_insn_r);
3722
3723 /* Branch, exception generation and system instructions. */
3724 if (ins_bit26 && !ins_bit27 && ins_bit28)
3725 return aarch64_record_branch_except_sys (aarch64_insn_r);
3726
3727 /* Load and store instructions. */
3728 if (!ins_bit25 && ins_bit27)
3729 return aarch64_record_load_store (aarch64_insn_r);
3730
3731 /* Data processing - register instructions. */
3732 if (ins_bit25 && !ins_bit26 && ins_bit27)
3733 return aarch64_record_data_proc_reg (aarch64_insn_r);
3734
3735 /* Data processing - SIMD and floating point instructions. */
3736 if (ins_bit25 && ins_bit26 && ins_bit27)
3737 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3738
3739 return AARCH64_RECORD_UNSUPPORTED;
3740}
3741
3742/* Cleans up local record registers and memory allocations. */
3743
3744static void
3745deallocate_reg_mem (insn_decode_record *record)
3746{
3747 xfree (record->aarch64_regs);
3748 xfree (record->aarch64_mems);
3749}
3750
3751/* Parse the current instruction and record the values of the registers and
3752 memory that will be changed in current instruction to record_arch_list
3753 return -1 if something is wrong. */
3754
3755int
3756aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
3757 CORE_ADDR insn_addr)
3758{
3759 uint32_t rec_no = 0;
3760 uint8_t insn_size = 4;
3761 uint32_t ret = 0;
3762 ULONGEST t_bit = 0, insn_id = 0;
3763 gdb_byte buf[insn_size];
3764 insn_decode_record aarch64_record;
3765
3766 memset (&buf[0], 0, insn_size);
3767 memset (&aarch64_record, 0, sizeof (insn_decode_record));
3768 target_read_memory (insn_addr, &buf[0], insn_size);
3769 aarch64_record.aarch64_insn
3770 = (uint32_t) extract_unsigned_integer (&buf[0],
3771 insn_size,
3772 gdbarch_byte_order (gdbarch));
3773 aarch64_record.regcache = regcache;
3774 aarch64_record.this_addr = insn_addr;
3775 aarch64_record.gdbarch = gdbarch;
3776
3777 ret = aarch64_record_decode_insn_handler (&aarch64_record);
3778 if (ret == AARCH64_RECORD_UNSUPPORTED)
3779 {
3780 printf_unfiltered (_("Process record does not support instruction "
3781 "0x%0x at address %s.\n"),
3782 aarch64_record.aarch64_insn,
3783 paddress (gdbarch, insn_addr));
3784 ret = -1;
3785 }
3786
3787 if (0 == ret)
3788 {
3789 /* Record registers. */
3790 record_full_arch_list_add_reg (aarch64_record.regcache,
3791 AARCH64_PC_REGNUM);
3792 /* Always record register CPSR. */
3793 record_full_arch_list_add_reg (aarch64_record.regcache,
3794 AARCH64_CPSR_REGNUM);
3795 if (aarch64_record.aarch64_regs)
3796 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
3797 if (record_full_arch_list_add_reg (aarch64_record.regcache,
3798 aarch64_record.aarch64_regs[rec_no]))
3799 ret = -1;
3800
3801 /* Record memories. */
3802 if (aarch64_record.aarch64_mems)
3803 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
3804 if (record_full_arch_list_add_mem
3805 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
3806 aarch64_record.aarch64_mems[rec_no].len))
3807 ret = -1;
3808
3809 if (record_full_arch_list_add_end ())
3810 ret = -1;
3811 }
3812
3813 deallocate_reg_mem (&aarch64_record);
3814 return ret;
3815}