]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/aarch64-tdep.c
[AArch64] Teach stub unwinder to terminate gracefully
[thirdparty/binutils-gdb.git] / gdb / aarch64-tdep.c
CommitLineData
07b287a0
MS
1/* Common target dependent code for GDB on AArch64 systems.
2
32d0add0 3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
07b287a0
MS
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21#include "defs.h"
22
23#include "frame.h"
24#include "inferior.h"
25#include "gdbcmd.h"
26#include "gdbcore.h"
07b287a0
MS
27#include "dis-asm.h"
28#include "regcache.h"
29#include "reggroups.h"
30#include "doublest.h"
31#include "value.h"
32#include "arch-utils.h"
33#include "osabi.h"
34#include "frame-unwind.h"
35#include "frame-base.h"
36#include "trad-frame.h"
37#include "objfiles.h"
38#include "dwarf2-frame.h"
39#include "gdbtypes.h"
40#include "prologue-value.h"
41#include "target-descriptions.h"
42#include "user-regs.h"
43#include "language.h"
44#include "infcall.h"
45
46#include "aarch64-tdep.h"
47
48#include "elf-bfd.h"
49#include "elf/aarch64.h"
50
07b287a0
MS
51#include "vec.h"
52
99afc88b
OJ
53#include "record.h"
54#include "record-full.h"
55
07b287a0 56#include "features/aarch64.c"
07b287a0
MS
57
58/* Pseudo register base numbers. */
59#define AARCH64_Q0_REGNUM 0
60#define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + 32)
61#define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
62#define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
63#define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
64
65/* The standard register names, and all the valid aliases for them. */
66static const struct
67{
68 const char *const name;
69 int regnum;
70} aarch64_register_aliases[] =
71{
72 /* 64-bit register names. */
73 {"fp", AARCH64_FP_REGNUM},
74 {"lr", AARCH64_LR_REGNUM},
75 {"sp", AARCH64_SP_REGNUM},
76
77 /* 32-bit register names. */
78 {"w0", AARCH64_X0_REGNUM + 0},
79 {"w1", AARCH64_X0_REGNUM + 1},
80 {"w2", AARCH64_X0_REGNUM + 2},
81 {"w3", AARCH64_X0_REGNUM + 3},
82 {"w4", AARCH64_X0_REGNUM + 4},
83 {"w5", AARCH64_X0_REGNUM + 5},
84 {"w6", AARCH64_X0_REGNUM + 6},
85 {"w7", AARCH64_X0_REGNUM + 7},
86 {"w8", AARCH64_X0_REGNUM + 8},
87 {"w9", AARCH64_X0_REGNUM + 9},
88 {"w10", AARCH64_X0_REGNUM + 10},
89 {"w11", AARCH64_X0_REGNUM + 11},
90 {"w12", AARCH64_X0_REGNUM + 12},
91 {"w13", AARCH64_X0_REGNUM + 13},
92 {"w14", AARCH64_X0_REGNUM + 14},
93 {"w15", AARCH64_X0_REGNUM + 15},
94 {"w16", AARCH64_X0_REGNUM + 16},
95 {"w17", AARCH64_X0_REGNUM + 17},
96 {"w18", AARCH64_X0_REGNUM + 18},
97 {"w19", AARCH64_X0_REGNUM + 19},
98 {"w20", AARCH64_X0_REGNUM + 20},
99 {"w21", AARCH64_X0_REGNUM + 21},
100 {"w22", AARCH64_X0_REGNUM + 22},
101 {"w23", AARCH64_X0_REGNUM + 23},
102 {"w24", AARCH64_X0_REGNUM + 24},
103 {"w25", AARCH64_X0_REGNUM + 25},
104 {"w26", AARCH64_X0_REGNUM + 26},
105 {"w27", AARCH64_X0_REGNUM + 27},
106 {"w28", AARCH64_X0_REGNUM + 28},
107 {"w29", AARCH64_X0_REGNUM + 29},
108 {"w30", AARCH64_X0_REGNUM + 30},
109
110 /* specials */
111 {"ip0", AARCH64_X0_REGNUM + 16},
112 {"ip1", AARCH64_X0_REGNUM + 17}
113};
114
115/* The required core 'R' registers. */
116static const char *const aarch64_r_register_names[] =
117{
118 /* These registers must appear in consecutive RAW register number
119 order and they must begin with AARCH64_X0_REGNUM! */
120 "x0", "x1", "x2", "x3",
121 "x4", "x5", "x6", "x7",
122 "x8", "x9", "x10", "x11",
123 "x12", "x13", "x14", "x15",
124 "x16", "x17", "x18", "x19",
125 "x20", "x21", "x22", "x23",
126 "x24", "x25", "x26", "x27",
127 "x28", "x29", "x30", "sp",
128 "pc", "cpsr"
129};
130
131/* The FP/SIMD 'V' registers. */
132static const char *const aarch64_v_register_names[] =
133{
134 /* These registers must appear in consecutive RAW register number
135 order and they must begin with AARCH64_V0_REGNUM! */
136 "v0", "v1", "v2", "v3",
137 "v4", "v5", "v6", "v7",
138 "v8", "v9", "v10", "v11",
139 "v12", "v13", "v14", "v15",
140 "v16", "v17", "v18", "v19",
141 "v20", "v21", "v22", "v23",
142 "v24", "v25", "v26", "v27",
143 "v28", "v29", "v30", "v31",
144 "fpsr",
145 "fpcr"
146};
147
148/* AArch64 prologue cache structure. */
149struct aarch64_prologue_cache
150{
db634143
PL
151 /* The program counter at the start of the function. It is used to
152 identify this frame as a prologue frame. */
153 CORE_ADDR func;
154
155 /* The program counter at the time this frame was created; i.e. where
156 this function was called from. It is used to identify this frame as a
157 stub frame. */
158 CORE_ADDR prev_pc;
159
07b287a0
MS
160 /* The stack pointer at the time this frame was created; i.e. the
161 caller's stack pointer when this function was called. It is used
162 to identify this frame. */
163 CORE_ADDR prev_sp;
164
7dfa3edc
PL
165 /* Is the target available to read from? */
166 int available_p;
167
07b287a0
MS
168 /* The frame base for this frame is just prev_sp - frame size.
169 FRAMESIZE is the distance from the frame pointer to the
170 initial stack pointer. */
171 int framesize;
172
173 /* The register used to hold the frame pointer for this frame. */
174 int framereg;
175
176 /* Saved register offsets. */
177 struct trad_frame_saved_reg *saved_regs;
178};
179
180/* Toggle this file's internal debugging dump. */
181static int aarch64_debug;
182
183static void
184show_aarch64_debug (struct ui_file *file, int from_tty,
185 struct cmd_list_element *c, const char *value)
186{
187 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
188}
189
190/* Extract a signed value from a bit field within an instruction
191 encoding.
192
193 INSN is the instruction opcode.
194
195 WIDTH specifies the width of the bit field to extract (in bits).
196
197 OFFSET specifies the least significant bit of the field where bits
198 are numbered zero counting from least to most significant. */
199
200static int32_t
201extract_signed_bitfield (uint32_t insn, unsigned width, unsigned offset)
202{
203 unsigned shift_l = sizeof (int32_t) * 8 - (offset + width);
204 unsigned shift_r = sizeof (int32_t) * 8 - width;
205
206 return ((int32_t) insn << shift_l) >> shift_r;
207}
208
209/* Determine if specified bits within an instruction opcode matches a
210 specific pattern.
211
212 INSN is the instruction opcode.
213
214 MASK specifies the bits within the opcode that are to be tested
215 agsinst for a match with PATTERN. */
216
217static int
218decode_masked_match (uint32_t insn, uint32_t mask, uint32_t pattern)
219{
220 return (insn & mask) == pattern;
221}
222
223/* Decode an opcode if it represents an immediate ADD or SUB instruction.
224
225 ADDR specifies the address of the opcode.
226 INSN specifies the opcode to test.
227 RD receives the 'rd' field from the decoded instruction.
228 RN receives the 'rn' field from the decoded instruction.
229
230 Return 1 if the opcodes matches and is decoded, otherwise 0. */
231static int
232decode_add_sub_imm (CORE_ADDR addr, uint32_t insn, unsigned *rd, unsigned *rn,
233 int32_t *imm)
234{
235 if ((insn & 0x9f000000) == 0x91000000)
236 {
237 unsigned shift;
238 unsigned op_is_sub;
239
240 *rd = (insn >> 0) & 0x1f;
241 *rn = (insn >> 5) & 0x1f;
242 *imm = (insn >> 10) & 0xfff;
243 shift = (insn >> 22) & 0x3;
244 op_is_sub = (insn >> 30) & 0x1;
245
246 switch (shift)
247 {
248 case 0:
249 break;
250 case 1:
251 *imm <<= 12;
252 break;
253 default:
254 /* UNDEFINED */
255 return 0;
256 }
257
258 if (op_is_sub)
259 *imm = -*imm;
260
261 if (aarch64_debug)
262 fprintf_unfiltered (gdb_stdlog,
263 "decode: 0x%s 0x%x add x%u, x%u, #%d\n",
264 core_addr_to_string_nz (addr), insn, *rd, *rn,
265 *imm);
266 return 1;
267 }
268 return 0;
269}
270
271/* Decode an opcode if it represents an ADRP instruction.
272
273 ADDR specifies the address of the opcode.
274 INSN specifies the opcode to test.
275 RD receives the 'rd' field from the decoded instruction.
276
277 Return 1 if the opcodes matches and is decoded, otherwise 0. */
278
279static int
280decode_adrp (CORE_ADDR addr, uint32_t insn, unsigned *rd)
281{
282 if (decode_masked_match (insn, 0x9f000000, 0x90000000))
283 {
284 *rd = (insn >> 0) & 0x1f;
285
286 if (aarch64_debug)
287 fprintf_unfiltered (gdb_stdlog,
288 "decode: 0x%s 0x%x adrp x%u, #?\n",
289 core_addr_to_string_nz (addr), insn, *rd);
290 return 1;
291 }
292 return 0;
293}
294
295/* Decode an opcode if it represents an branch immediate or branch
296 and link immediate instruction.
297
298 ADDR specifies the address of the opcode.
299 INSN specifies the opcode to test.
300 LINK receives the 'link' bit from the decoded instruction.
301 OFFSET receives the immediate offset from the decoded instruction.
302
303 Return 1 if the opcodes matches and is decoded, otherwise 0. */
304
305static int
306decode_b (CORE_ADDR addr, uint32_t insn, unsigned *link, int32_t *offset)
307{
308 /* b 0001 01ii iiii iiii iiii iiii iiii iiii */
309 /* bl 1001 01ii iiii iiii iiii iiii iiii iiii */
310 if (decode_masked_match (insn, 0x7c000000, 0x14000000))
311 {
312 *link = insn >> 31;
313 *offset = extract_signed_bitfield (insn, 26, 0) << 2;
314
315 if (aarch64_debug)
316 fprintf_unfiltered (gdb_stdlog,
317 "decode: 0x%s 0x%x %s 0x%s\n",
318 core_addr_to_string_nz (addr), insn,
319 *link ? "bl" : "b",
320 core_addr_to_string_nz (addr + *offset));
321
322 return 1;
323 }
324 return 0;
325}
326
327/* Decode an opcode if it represents a conditional branch instruction.
328
329 ADDR specifies the address of the opcode.
330 INSN specifies the opcode to test.
331 COND receives the branch condition field from the decoded
332 instruction.
333 OFFSET receives the immediate offset from the decoded instruction.
334
335 Return 1 if the opcodes matches and is decoded, otherwise 0. */
336
337static int
338decode_bcond (CORE_ADDR addr, uint32_t insn, unsigned *cond, int32_t *offset)
339{
340 if (decode_masked_match (insn, 0xfe000000, 0x54000000))
341 {
342 *cond = (insn >> 0) & 0xf;
343 *offset = extract_signed_bitfield (insn, 19, 5) << 2;
344
345 if (aarch64_debug)
346 fprintf_unfiltered (gdb_stdlog,
347 "decode: 0x%s 0x%x b<%u> 0x%s\n",
348 core_addr_to_string_nz (addr), insn, *cond,
349 core_addr_to_string_nz (addr + *offset));
350 return 1;
351 }
352 return 0;
353}
354
355/* Decode an opcode if it represents a branch via register instruction.
356
357 ADDR specifies the address of the opcode.
358 INSN specifies the opcode to test.
359 LINK receives the 'link' bit from the decoded instruction.
360 RN receives the 'rn' field from the decoded instruction.
361
362 Return 1 if the opcodes matches and is decoded, otherwise 0. */
363
364static int
365decode_br (CORE_ADDR addr, uint32_t insn, unsigned *link, unsigned *rn)
366{
367 /* 8 4 0 6 2 8 4 0 */
368 /* blr 110101100011111100000000000rrrrr */
369 /* br 110101100001111100000000000rrrrr */
370 if (decode_masked_match (insn, 0xffdffc1f, 0xd61f0000))
371 {
372 *link = (insn >> 21) & 1;
373 *rn = (insn >> 5) & 0x1f;
374
375 if (aarch64_debug)
376 fprintf_unfiltered (gdb_stdlog,
377 "decode: 0x%s 0x%x %s 0x%x\n",
378 core_addr_to_string_nz (addr), insn,
379 *link ? "blr" : "br", *rn);
380
381 return 1;
382 }
383 return 0;
384}
385
386/* Decode an opcode if it represents a CBZ or CBNZ instruction.
387
388 ADDR specifies the address of the opcode.
389 INSN specifies the opcode to test.
390 IS64 receives the 'sf' field from the decoded instruction.
391 OP receives the 'op' field from the decoded instruction.
392 RN receives the 'rn' field from the decoded instruction.
393 OFFSET receives the 'imm19' field from the decoded instruction.
394
395 Return 1 if the opcodes matches and is decoded, otherwise 0. */
396
397static int
398decode_cb (CORE_ADDR addr,
399 uint32_t insn, int *is64, unsigned *op, unsigned *rn,
400 int32_t *offset)
401{
402 if (decode_masked_match (insn, 0x7e000000, 0x34000000))
403 {
404 /* cbz T011 010o iiii iiii iiii iiii iiir rrrr */
405 /* cbnz T011 010o iiii iiii iiii iiii iiir rrrr */
406
407 *rn = (insn >> 0) & 0x1f;
408 *is64 = (insn >> 31) & 0x1;
409 *op = (insn >> 24) & 0x1;
410 *offset = extract_signed_bitfield (insn, 19, 5) << 2;
411
412 if (aarch64_debug)
413 fprintf_unfiltered (gdb_stdlog,
414 "decode: 0x%s 0x%x %s 0x%s\n",
415 core_addr_to_string_nz (addr), insn,
416 *op ? "cbnz" : "cbz",
417 core_addr_to_string_nz (addr + *offset));
418 return 1;
419 }
420 return 0;
421}
422
423/* Decode an opcode if it represents a ERET instruction.
424
425 ADDR specifies the address of the opcode.
426 INSN specifies the opcode to test.
427
428 Return 1 if the opcodes matches and is decoded, otherwise 0. */
429
430static int
431decode_eret (CORE_ADDR addr, uint32_t insn)
432{
433 /* eret 1101 0110 1001 1111 0000 0011 1110 0000 */
434 if (insn == 0xd69f03e0)
435 {
436 if (aarch64_debug)
437 fprintf_unfiltered (gdb_stdlog, "decode: 0x%s 0x%x eret\n",
438 core_addr_to_string_nz (addr), insn);
439 return 1;
440 }
441 return 0;
442}
443
444/* Decode an opcode if it represents a MOVZ instruction.
445
446 ADDR specifies the address of the opcode.
447 INSN specifies the opcode to test.
448 RD receives the 'rd' field from the decoded instruction.
449
450 Return 1 if the opcodes matches and is decoded, otherwise 0. */
451
452static int
453decode_movz (CORE_ADDR addr, uint32_t insn, unsigned *rd)
454{
455 if (decode_masked_match (insn, 0xff800000, 0x52800000))
456 {
457 *rd = (insn >> 0) & 0x1f;
458
459 if (aarch64_debug)
460 fprintf_unfiltered (gdb_stdlog,
461 "decode: 0x%s 0x%x movz x%u, #?\n",
462 core_addr_to_string_nz (addr), insn, *rd);
463 return 1;
464 }
465 return 0;
466}
467
468/* Decode an opcode if it represents a ORR (shifted register)
469 instruction.
470
471 ADDR specifies the address of the opcode.
472 INSN specifies the opcode to test.
473 RD receives the 'rd' field from the decoded instruction.
474 RN receives the 'rn' field from the decoded instruction.
475 RM receives the 'rm' field from the decoded instruction.
476 IMM receives the 'imm6' field from the decoded instruction.
477
478 Return 1 if the opcodes matches and is decoded, otherwise 0. */
479
480static int
481decode_orr_shifted_register_x (CORE_ADDR addr,
482 uint32_t insn, unsigned *rd, unsigned *rn,
483 unsigned *rm, int32_t *imm)
484{
485 if (decode_masked_match (insn, 0xff200000, 0xaa000000))
486 {
487 *rd = (insn >> 0) & 0x1f;
488 *rn = (insn >> 5) & 0x1f;
489 *rm = (insn >> 16) & 0x1f;
490 *imm = (insn >> 10) & 0x3f;
491
492 if (aarch64_debug)
493 fprintf_unfiltered (gdb_stdlog,
494 "decode: 0x%s 0x%x orr x%u, x%u, x%u, #%u\n",
495 core_addr_to_string_nz (addr), insn, *rd,
496 *rn, *rm, *imm);
497 return 1;
498 }
499 return 0;
500}
501
502/* Decode an opcode if it represents a RET instruction.
503
504 ADDR specifies the address of the opcode.
505 INSN specifies the opcode to test.
506 RN receives the 'rn' field from the decoded instruction.
507
508 Return 1 if the opcodes matches and is decoded, otherwise 0. */
509
510static int
511decode_ret (CORE_ADDR addr, uint32_t insn, unsigned *rn)
512{
513 if (decode_masked_match (insn, 0xfffffc1f, 0xd65f0000))
514 {
515 *rn = (insn >> 5) & 0x1f;
516 if (aarch64_debug)
517 fprintf_unfiltered (gdb_stdlog,
518 "decode: 0x%s 0x%x ret x%u\n",
519 core_addr_to_string_nz (addr), insn, *rn);
520 return 1;
521 }
522 return 0;
523}
524
525/* Decode an opcode if it represents the following instruction:
526 STP rt, rt2, [rn, #imm]
527
528 ADDR specifies the address of the opcode.
529 INSN specifies the opcode to test.
530 RT1 receives the 'rt' field from the decoded instruction.
531 RT2 receives the 'rt2' field from the decoded instruction.
532 RN receives the 'rn' field from the decoded instruction.
533 IMM receives the 'imm' field from the decoded instruction.
534
535 Return 1 if the opcodes matches and is decoded, otherwise 0. */
536
537static int
538decode_stp_offset (CORE_ADDR addr,
539 uint32_t insn,
540 unsigned *rt1, unsigned *rt2, unsigned *rn, int32_t *imm)
541{
542 if (decode_masked_match (insn, 0xffc00000, 0xa9000000))
543 {
544 *rt1 = (insn >> 0) & 0x1f;
545 *rn = (insn >> 5) & 0x1f;
546 *rt2 = (insn >> 10) & 0x1f;
547 *imm = extract_signed_bitfield (insn, 7, 15);
548 *imm <<= 3;
549
550 if (aarch64_debug)
551 fprintf_unfiltered (gdb_stdlog,
552 "decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]\n",
553 core_addr_to_string_nz (addr), insn,
554 *rt1, *rt2, *rn, *imm);
555 return 1;
556 }
557 return 0;
558}
559
560/* Decode an opcode if it represents the following instruction:
561 STP rt, rt2, [rn, #imm]!
562
563 ADDR specifies the address of the opcode.
564 INSN specifies the opcode to test.
565 RT1 receives the 'rt' field from the decoded instruction.
566 RT2 receives the 'rt2' field from the decoded instruction.
567 RN receives the 'rn' field from the decoded instruction.
568 IMM receives the 'imm' field from the decoded instruction.
569
570 Return 1 if the opcodes matches and is decoded, otherwise 0. */
571
572static int
573decode_stp_offset_wb (CORE_ADDR addr,
574 uint32_t insn,
575 unsigned *rt1, unsigned *rt2, unsigned *rn,
576 int32_t *imm)
577{
578 if (decode_masked_match (insn, 0xffc00000, 0xa9800000))
579 {
580 *rt1 = (insn >> 0) & 0x1f;
581 *rn = (insn >> 5) & 0x1f;
582 *rt2 = (insn >> 10) & 0x1f;
583 *imm = extract_signed_bitfield (insn, 7, 15);
584 *imm <<= 3;
585
586 if (aarch64_debug)
587 fprintf_unfiltered (gdb_stdlog,
588 "decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]!\n",
589 core_addr_to_string_nz (addr), insn,
590 *rt1, *rt2, *rn, *imm);
591 return 1;
592 }
593 return 0;
594}
595
596/* Decode an opcode if it represents the following instruction:
597 STUR rt, [rn, #imm]
598
599 ADDR specifies the address of the opcode.
600 INSN specifies the opcode to test.
601 IS64 receives size field from the decoded instruction.
602 RT receives the 'rt' field from the decoded instruction.
603 RN receives the 'rn' field from the decoded instruction.
604 IMM receives the 'imm' field from the decoded instruction.
605
606 Return 1 if the opcodes matches and is decoded, otherwise 0. */
607
608static int
609decode_stur (CORE_ADDR addr, uint32_t insn, int *is64, unsigned *rt,
610 unsigned *rn, int32_t *imm)
611{
612 if (decode_masked_match (insn, 0xbfe00c00, 0xb8000000))
613 {
614 *is64 = (insn >> 30) & 1;
615 *rt = (insn >> 0) & 0x1f;
616 *rn = (insn >> 5) & 0x1f;
617 *imm = extract_signed_bitfield (insn, 9, 12);
618
619 if (aarch64_debug)
620 fprintf_unfiltered (gdb_stdlog,
621 "decode: 0x%s 0x%x stur %c%u, [x%u + #%d]\n",
622 core_addr_to_string_nz (addr), insn,
623 *is64 ? 'x' : 'w', *rt, *rn, *imm);
624 return 1;
625 }
626 return 0;
627}
628
629/* Decode an opcode if it represents a TB or TBNZ instruction.
630
631 ADDR specifies the address of the opcode.
632 INSN specifies the opcode to test.
633 OP receives the 'op' field from the decoded instruction.
634 BIT receives the bit position field from the decoded instruction.
635 RT receives 'rt' field from the decoded instruction.
636 IMM receives 'imm' field from the decoded instruction.
637
638 Return 1 if the opcodes matches and is decoded, otherwise 0. */
639
640static int
641decode_tb (CORE_ADDR addr,
642 uint32_t insn, unsigned *op, unsigned *bit, unsigned *rt,
643 int32_t *imm)
644{
645 if (decode_masked_match (insn, 0x7e000000, 0x36000000))
646 {
647 /* tbz b011 0110 bbbb biii iiii iiii iiir rrrr */
648 /* tbnz B011 0111 bbbb biii iiii iiii iiir rrrr */
649
650 *rt = (insn >> 0) & 0x1f;
651 *op = insn & (1 << 24);
652 *bit = ((insn >> (31 - 4)) & 0x20) | ((insn >> 19) & 0x1f);
653 *imm = extract_signed_bitfield (insn, 14, 5) << 2;
654
655 if (aarch64_debug)
656 fprintf_unfiltered (gdb_stdlog,
657 "decode: 0x%s 0x%x %s x%u, #%u, 0x%s\n",
658 core_addr_to_string_nz (addr), insn,
659 *op ? "tbnz" : "tbz", *rt, *bit,
660 core_addr_to_string_nz (addr + *imm));
661 return 1;
662 }
663 return 0;
664}
665
666/* Analyze a prologue, looking for a recognizable stack frame
667 and frame pointer. Scan until we encounter a store that could
668 clobber the stack frame unexpectedly, or an unknown instruction. */
669
670static CORE_ADDR
671aarch64_analyze_prologue (struct gdbarch *gdbarch,
672 CORE_ADDR start, CORE_ADDR limit,
673 struct aarch64_prologue_cache *cache)
674{
675 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
676 int i;
677 pv_t regs[AARCH64_X_REGISTER_COUNT];
678 struct pv_area *stack;
679 struct cleanup *back_to;
680
681 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
682 regs[i] = pv_register (i, 0);
683 stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
684 back_to = make_cleanup_free_pv_area (stack);
685
686 for (; start < limit; start += 4)
687 {
688 uint32_t insn;
689 unsigned rd;
690 unsigned rn;
691 unsigned rm;
692 unsigned rt;
693 unsigned rt1;
694 unsigned rt2;
695 int op_is_sub;
696 int32_t imm;
697 unsigned cond;
96b32e50 698 int is64;
07b287a0
MS
699 unsigned is_link;
700 unsigned op;
701 unsigned bit;
702 int32_t offset;
703
704 insn = read_memory_unsigned_integer (start, 4, byte_order_for_code);
705
706 if (decode_add_sub_imm (start, insn, &rd, &rn, &imm))
707 regs[rd] = pv_add_constant (regs[rn], imm);
708 else if (decode_adrp (start, insn, &rd))
709 regs[rd] = pv_unknown ();
710 else if (decode_b (start, insn, &is_link, &offset))
711 {
712 /* Stop analysis on branch. */
713 break;
714 }
715 else if (decode_bcond (start, insn, &cond, &offset))
716 {
717 /* Stop analysis on branch. */
718 break;
719 }
720 else if (decode_br (start, insn, &is_link, &rn))
721 {
722 /* Stop analysis on branch. */
723 break;
724 }
725 else if (decode_cb (start, insn, &is64, &op, &rn, &offset))
726 {
727 /* Stop analysis on branch. */
728 break;
729 }
730 else if (decode_eret (start, insn))
731 {
732 /* Stop analysis on branch. */
733 break;
734 }
735 else if (decode_movz (start, insn, &rd))
736 regs[rd] = pv_unknown ();
737 else
738 if (decode_orr_shifted_register_x (start, insn, &rd, &rn, &rm, &imm))
739 {
740 if (imm == 0 && rn == 31)
741 regs[rd] = regs[rm];
742 else
743 {
744 if (aarch64_debug)
745 fprintf_unfiltered
746 (gdb_stdlog,
747 "aarch64: prologue analysis gave up addr=0x%s "
748 "opcode=0x%x (orr x register)\n",
749 core_addr_to_string_nz (start),
750 insn);
751 break;
752 }
753 }
754 else if (decode_ret (start, insn, &rn))
755 {
756 /* Stop analysis on branch. */
757 break;
758 }
759 else if (decode_stur (start, insn, &is64, &rt, &rn, &offset))
760 {
761 pv_area_store (stack, pv_add_constant (regs[rn], offset),
762 is64 ? 8 : 4, regs[rt]);
763 }
764 else if (decode_stp_offset (start, insn, &rt1, &rt2, &rn, &imm))
765 {
766 /* If recording this store would invalidate the store area
767 (perhaps because rn is not known) then we should abandon
768 further prologue analysis. */
769 if (pv_area_store_would_trash (stack,
770 pv_add_constant (regs[rn], imm)))
771 break;
772
773 if (pv_area_store_would_trash (stack,
774 pv_add_constant (regs[rn], imm + 8)))
775 break;
776
777 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
778 regs[rt1]);
779 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
780 regs[rt2]);
781 }
782 else if (decode_stp_offset_wb (start, insn, &rt1, &rt2, &rn, &imm))
783 {
784 /* If recording this store would invalidate the store area
785 (perhaps because rn is not known) then we should abandon
786 further prologue analysis. */
787 if (pv_area_store_would_trash (stack,
14ac654f
MS
788 pv_add_constant (regs[rn], imm)))
789 break;
790
791 if (pv_area_store_would_trash (stack,
07b287a0
MS
792 pv_add_constant (regs[rn], imm + 8)))
793 break;
794
795 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
796 regs[rt1]);
797 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
798 regs[rt2]);
799 regs[rn] = pv_add_constant (regs[rn], imm);
800 }
801 else if (decode_tb (start, insn, &op, &bit, &rn, &offset))
802 {
803 /* Stop analysis on branch. */
804 break;
805 }
806 else
807 {
808 if (aarch64_debug)
809 fprintf_unfiltered (gdb_stdlog,
810 "aarch64: prologue analysis gave up addr=0x%s"
811 " opcode=0x%x\n",
812 core_addr_to_string_nz (start), insn);
813 break;
814 }
815 }
816
817 if (cache == NULL)
818 {
819 do_cleanups (back_to);
820 return start;
821 }
822
823 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
824 {
825 /* Frame pointer is fp. Frame size is constant. */
826 cache->framereg = AARCH64_FP_REGNUM;
827 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
828 }
829 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
830 {
831 /* Try the stack pointer. */
832 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
833 cache->framereg = AARCH64_SP_REGNUM;
834 }
835 else
836 {
837 /* We're just out of luck. We don't know where the frame is. */
838 cache->framereg = -1;
839 cache->framesize = 0;
840 }
841
842 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
843 {
844 CORE_ADDR offset;
845
846 if (pv_area_find_reg (stack, gdbarch, i, &offset))
847 cache->saved_regs[i].addr = offset;
848 }
849
850 do_cleanups (back_to);
851 return start;
852}
853
854/* Implement the "skip_prologue" gdbarch method. */
855
856static CORE_ADDR
857aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
858{
859 unsigned long inst;
860 CORE_ADDR skip_pc;
861 CORE_ADDR func_addr, limit_pc;
862 struct symtab_and_line sal;
863
864 /* See if we can determine the end of the prologue via the symbol
865 table. If so, then return either PC, or the PC after the
866 prologue, whichever is greater. */
867 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
868 {
869 CORE_ADDR post_prologue_pc
870 = skip_prologue_using_sal (gdbarch, func_addr);
871
872 if (post_prologue_pc != 0)
873 return max (pc, post_prologue_pc);
874 }
875
876 /* Can't determine prologue from the symbol table, need to examine
877 instructions. */
878
879 /* Find an upper limit on the function prologue using the debug
880 information. If the debug information could not be used to
881 provide that bound, then use an arbitrary large number as the
882 upper bound. */
883 limit_pc = skip_prologue_using_sal (gdbarch, pc);
884 if (limit_pc == 0)
885 limit_pc = pc + 128; /* Magic. */
886
887 /* Try disassembling prologue. */
888 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
889}
890
891/* Scan the function prologue for THIS_FRAME and populate the prologue
892 cache CACHE. */
893
894static void
895aarch64_scan_prologue (struct frame_info *this_frame,
896 struct aarch64_prologue_cache *cache)
897{
898 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
899 CORE_ADDR prologue_start;
900 CORE_ADDR prologue_end;
901 CORE_ADDR prev_pc = get_frame_pc (this_frame);
902 struct gdbarch *gdbarch = get_frame_arch (this_frame);
903
db634143
PL
904 cache->prev_pc = prev_pc;
905
07b287a0
MS
906 /* Assume we do not find a frame. */
907 cache->framereg = -1;
908 cache->framesize = 0;
909
910 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
911 &prologue_end))
912 {
913 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
914
915 if (sal.line == 0)
916 {
917 /* No line info so use the current PC. */
918 prologue_end = prev_pc;
919 }
920 else if (sal.end < prologue_end)
921 {
922 /* The next line begins after the function end. */
923 prologue_end = sal.end;
924 }
925
926 prologue_end = min (prologue_end, prev_pc);
927 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
928 }
929 else
930 {
931 CORE_ADDR frame_loc;
932 LONGEST saved_fp;
933 LONGEST saved_lr;
934 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
935
936 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
937 if (frame_loc == 0)
938 return;
939
940 cache->framereg = AARCH64_FP_REGNUM;
941 cache->framesize = 16;
942 cache->saved_regs[29].addr = 0;
943 cache->saved_regs[30].addr = 8;
944 }
945}
946
7dfa3edc
PL
947/* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
948 function may throw an exception if the inferior's registers or memory is
949 not available. */
07b287a0 950
7dfa3edc
PL
951static void
952aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
953 struct aarch64_prologue_cache *cache)
07b287a0 954{
07b287a0
MS
955 CORE_ADDR unwound_fp;
956 int reg;
957
07b287a0
MS
958 aarch64_scan_prologue (this_frame, cache);
959
960 if (cache->framereg == -1)
7dfa3edc 961 return;
07b287a0
MS
962
963 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
964 if (unwound_fp == 0)
7dfa3edc 965 return;
07b287a0
MS
966
967 cache->prev_sp = unwound_fp + cache->framesize;
968
969 /* Calculate actual addresses of saved registers using offsets
970 determined by aarch64_analyze_prologue. */
971 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
972 if (trad_frame_addr_p (cache->saved_regs, reg))
973 cache->saved_regs[reg].addr += cache->prev_sp;
974
db634143
PL
975 cache->func = get_frame_func (this_frame);
976
7dfa3edc
PL
977 cache->available_p = 1;
978}
979
980/* Allocate and fill in *THIS_CACHE with information about the prologue of
981 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
982 Return a pointer to the current aarch64_prologue_cache in
983 *THIS_CACHE. */
984
985static struct aarch64_prologue_cache *
986aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
987{
988 struct aarch64_prologue_cache *cache;
989
990 if (*this_cache != NULL)
991 return *this_cache;
992
993 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
994 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
995 *this_cache = cache;
996
997 TRY
998 {
999 aarch64_make_prologue_cache_1 (this_frame, cache);
1000 }
1001 CATCH (ex, RETURN_MASK_ERROR)
1002 {
1003 if (ex.error != NOT_AVAILABLE_ERROR)
1004 throw_exception (ex);
1005 }
1006 END_CATCH
1007
07b287a0
MS
1008 return cache;
1009}
1010
7dfa3edc
PL
1011/* Implement the "stop_reason" frame_unwind method. */
1012
1013static enum unwind_stop_reason
1014aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
1015 void **this_cache)
1016{
1017 struct aarch64_prologue_cache *cache
1018 = aarch64_make_prologue_cache (this_frame, this_cache);
1019
1020 if (!cache->available_p)
1021 return UNWIND_UNAVAILABLE;
1022
1023 /* Halt the backtrace at "_start". */
1024 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
1025 return UNWIND_OUTERMOST;
1026
1027 /* We've hit a wall, stop. */
1028 if (cache->prev_sp == 0)
1029 return UNWIND_OUTERMOST;
1030
1031 return UNWIND_NO_REASON;
1032}
1033
07b287a0
MS
1034/* Our frame ID for a normal frame is the current function's starting
1035 PC and the caller's SP when we were called. */
1036
1037static void
1038aarch64_prologue_this_id (struct frame_info *this_frame,
1039 void **this_cache, struct frame_id *this_id)
1040{
7c8edfae
PL
1041 struct aarch64_prologue_cache *cache
1042 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0 1043
7dfa3edc
PL
1044 if (!cache->available_p)
1045 *this_id = frame_id_build_unavailable_stack (cache->func);
1046 else
1047 *this_id = frame_id_build (cache->prev_sp, cache->func);
07b287a0
MS
1048}
1049
1050/* Implement the "prev_register" frame_unwind method. */
1051
1052static struct value *
1053aarch64_prologue_prev_register (struct frame_info *this_frame,
1054 void **this_cache, int prev_regnum)
1055{
1056 struct gdbarch *gdbarch = get_frame_arch (this_frame);
7c8edfae
PL
1057 struct aarch64_prologue_cache *cache
1058 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
1059
1060 /* If we are asked to unwind the PC, then we need to return the LR
1061 instead. The prologue may save PC, but it will point into this
1062 frame's prologue, not the next frame's resume location. */
1063 if (prev_regnum == AARCH64_PC_REGNUM)
1064 {
1065 CORE_ADDR lr;
1066
1067 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1068 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
1069 }
1070
1071 /* SP is generally not saved to the stack, but this frame is
1072 identified by the next frame's stack pointer at the time of the
1073 call. The value was already reconstructed into PREV_SP. */
1074 /*
1075 +----------+ ^
1076 | saved lr | |
1077 +->| saved fp |--+
1078 | | |
1079 | | | <- Previous SP
1080 | +----------+
1081 | | saved lr |
1082 +--| saved fp |<- FP
1083 | |
1084 | |<- SP
1085 +----------+ */
1086 if (prev_regnum == AARCH64_SP_REGNUM)
1087 return frame_unwind_got_constant (this_frame, prev_regnum,
1088 cache->prev_sp);
1089
1090 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1091 prev_regnum);
1092}
1093
1094/* AArch64 prologue unwinder. */
1095struct frame_unwind aarch64_prologue_unwind =
1096{
1097 NORMAL_FRAME,
7dfa3edc 1098 aarch64_prologue_frame_unwind_stop_reason,
07b287a0
MS
1099 aarch64_prologue_this_id,
1100 aarch64_prologue_prev_register,
1101 NULL,
1102 default_frame_sniffer
1103};
1104
8b61f75d
PL
1105/* Allocate and fill in *THIS_CACHE with information about the prologue of
1106 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1107 Return a pointer to the current aarch64_prologue_cache in
1108 *THIS_CACHE. */
07b287a0
MS
1109
1110static struct aarch64_prologue_cache *
8b61f75d 1111aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
07b287a0 1112{
07b287a0 1113 struct aarch64_prologue_cache *cache;
8b61f75d
PL
1114
1115 if (*this_cache != NULL)
1116 return *this_cache;
07b287a0
MS
1117
1118 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1119 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
8b61f75d 1120 *this_cache = cache;
07b287a0 1121
02a2a705
PL
1122 TRY
1123 {
1124 cache->prev_sp = get_frame_register_unsigned (this_frame,
1125 AARCH64_SP_REGNUM);
1126 cache->prev_pc = get_frame_pc (this_frame);
1127 cache->available_p = 1;
1128 }
1129 CATCH (ex, RETURN_MASK_ERROR)
1130 {
1131 if (ex.error != NOT_AVAILABLE_ERROR)
1132 throw_exception (ex);
1133 }
1134 END_CATCH
07b287a0
MS
1135
1136 return cache;
1137}
1138
02a2a705
PL
1139/* Implement the "stop_reason" frame_unwind method. */
1140
1141static enum unwind_stop_reason
1142aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
1143 void **this_cache)
1144{
1145 struct aarch64_prologue_cache *cache
1146 = aarch64_make_stub_cache (this_frame, this_cache);
1147
1148 if (!cache->available_p)
1149 return UNWIND_UNAVAILABLE;
1150
1151 return UNWIND_NO_REASON;
1152}
1153
07b287a0
MS
1154/* Our frame ID for a stub frame is the current SP and LR. */
1155
1156static void
1157aarch64_stub_this_id (struct frame_info *this_frame,
1158 void **this_cache, struct frame_id *this_id)
1159{
8b61f75d
PL
1160 struct aarch64_prologue_cache *cache
1161 = aarch64_make_stub_cache (this_frame, this_cache);
07b287a0 1162
02a2a705
PL
1163 if (cache->available_p)
1164 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1165 else
1166 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
07b287a0
MS
1167}
1168
1169/* Implement the "sniffer" frame_unwind method. */
1170
1171static int
1172aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1173 struct frame_info *this_frame,
1174 void **this_prologue_cache)
1175{
1176 CORE_ADDR addr_in_block;
1177 gdb_byte dummy[4];
1178
1179 addr_in_block = get_frame_address_in_block (this_frame);
3e5d3a5a 1180 if (in_plt_section (addr_in_block)
07b287a0
MS
1181 /* We also use the stub winder if the target memory is unreadable
1182 to avoid having the prologue unwinder trying to read it. */
1183 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1184 return 1;
1185
1186 return 0;
1187}
1188
1189/* AArch64 stub unwinder. */
1190struct frame_unwind aarch64_stub_unwind =
1191{
1192 NORMAL_FRAME,
02a2a705 1193 aarch64_stub_frame_unwind_stop_reason,
07b287a0
MS
1194 aarch64_stub_this_id,
1195 aarch64_prologue_prev_register,
1196 NULL,
1197 aarch64_stub_unwind_sniffer
1198};
1199
1200/* Return the frame base address of *THIS_FRAME. */
1201
1202static CORE_ADDR
1203aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1204{
7c8edfae
PL
1205 struct aarch64_prologue_cache *cache
1206 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
1207
1208 return cache->prev_sp - cache->framesize;
1209}
1210
1211/* AArch64 default frame base information. */
1212struct frame_base aarch64_normal_base =
1213{
1214 &aarch64_prologue_unwind,
1215 aarch64_normal_frame_base,
1216 aarch64_normal_frame_base,
1217 aarch64_normal_frame_base
1218};
1219
1220/* Assuming THIS_FRAME is a dummy, return the frame ID of that
1221 dummy frame. The frame ID's base needs to match the TOS value
1222 saved by save_dummy_frame_tos () and returned from
1223 aarch64_push_dummy_call, and the PC needs to match the dummy
1224 frame's breakpoint. */
1225
1226static struct frame_id
1227aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1228{
1229 return frame_id_build (get_frame_register_unsigned (this_frame,
1230 AARCH64_SP_REGNUM),
1231 get_frame_pc (this_frame));
1232}
1233
1234/* Implement the "unwind_pc" gdbarch method. */
1235
1236static CORE_ADDR
1237aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1238{
1239 CORE_ADDR pc
1240 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
1241
1242 return pc;
1243}
1244
1245/* Implement the "unwind_sp" gdbarch method. */
1246
1247static CORE_ADDR
1248aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1249{
1250 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1251}
1252
1253/* Return the value of the REGNUM register in the previous frame of
1254 *THIS_FRAME. */
1255
1256static struct value *
1257aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1258 void **this_cache, int regnum)
1259{
1260 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1261 CORE_ADDR lr;
1262
1263 switch (regnum)
1264 {
1265 case AARCH64_PC_REGNUM:
1266 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1267 return frame_unwind_got_constant (this_frame, regnum, lr);
1268
1269 default:
1270 internal_error (__FILE__, __LINE__,
1271 _("Unexpected register %d"), regnum);
1272 }
1273}
1274
1275/* Implement the "init_reg" dwarf2_frame_ops method. */
1276
1277static void
1278aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1279 struct dwarf2_frame_state_reg *reg,
1280 struct frame_info *this_frame)
1281{
1282 switch (regnum)
1283 {
1284 case AARCH64_PC_REGNUM:
1285 reg->how = DWARF2_FRAME_REG_FN;
1286 reg->loc.fn = aarch64_dwarf2_prev_register;
1287 break;
1288 case AARCH64_SP_REGNUM:
1289 reg->how = DWARF2_FRAME_REG_CFA;
1290 break;
1291 }
1292}
1293
1294/* When arguments must be pushed onto the stack, they go on in reverse
1295 order. The code below implements a FILO (stack) to do this. */
1296
1297typedef struct
1298{
1299 /* Value to pass on stack. */
1300 const void *data;
1301
1302 /* Size in bytes of value to pass on stack. */
1303 int len;
1304} stack_item_t;
1305
1306DEF_VEC_O (stack_item_t);
1307
1308/* Return the alignment (in bytes) of the given type. */
1309
1310static int
1311aarch64_type_align (struct type *t)
1312{
1313 int n;
1314 int align;
1315 int falign;
1316
1317 t = check_typedef (t);
1318 switch (TYPE_CODE (t))
1319 {
1320 default:
1321 /* Should never happen. */
1322 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1323 return 4;
1324
1325 case TYPE_CODE_PTR:
1326 case TYPE_CODE_ENUM:
1327 case TYPE_CODE_INT:
1328 case TYPE_CODE_FLT:
1329 case TYPE_CODE_SET:
1330 case TYPE_CODE_RANGE:
1331 case TYPE_CODE_BITSTRING:
1332 case TYPE_CODE_REF:
1333 case TYPE_CODE_CHAR:
1334 case TYPE_CODE_BOOL:
1335 return TYPE_LENGTH (t);
1336
1337 case TYPE_CODE_ARRAY:
1338 case TYPE_CODE_COMPLEX:
1339 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1340
1341 case TYPE_CODE_STRUCT:
1342 case TYPE_CODE_UNION:
1343 align = 1;
1344 for (n = 0; n < TYPE_NFIELDS (t); n++)
1345 {
1346 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1347 if (falign > align)
1348 align = falign;
1349 }
1350 return align;
1351 }
1352}
1353
1354/* Return 1 if *TY is a homogeneous floating-point aggregate as
1355 defined in the AAPCS64 ABI document; otherwise return 0. */
1356
1357static int
1358is_hfa (struct type *ty)
1359{
1360 switch (TYPE_CODE (ty))
1361 {
1362 case TYPE_CODE_ARRAY:
1363 {
1364 struct type *target_ty = TYPE_TARGET_TYPE (ty);
1365 if (TYPE_CODE (target_ty) == TYPE_CODE_FLT && TYPE_LENGTH (ty) <= 4)
1366 return 1;
1367 break;
1368 }
1369
1370 case TYPE_CODE_UNION:
1371 case TYPE_CODE_STRUCT:
1372 {
1373 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
1374 {
1375 struct type *member0_type;
1376
1377 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
1378 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT)
1379 {
1380 int i;
1381
1382 for (i = 0; i < TYPE_NFIELDS (ty); i++)
1383 {
1384 struct type *member1_type;
1385
1386 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
1387 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
1388 || (TYPE_LENGTH (member0_type)
1389 != TYPE_LENGTH (member1_type)))
1390 return 0;
1391 }
1392 return 1;
1393 }
1394 }
1395 return 0;
1396 }
1397
1398 default:
1399 break;
1400 }
1401
1402 return 0;
1403}
1404
1405/* AArch64 function call information structure. */
1406struct aarch64_call_info
1407{
1408 /* the current argument number. */
1409 unsigned argnum;
1410
1411 /* The next general purpose register number, equivalent to NGRN as
1412 described in the AArch64 Procedure Call Standard. */
1413 unsigned ngrn;
1414
1415 /* The next SIMD and floating point register number, equivalent to
1416 NSRN as described in the AArch64 Procedure Call Standard. */
1417 unsigned nsrn;
1418
1419 /* The next stacked argument address, equivalent to NSAA as
1420 described in the AArch64 Procedure Call Standard. */
1421 unsigned nsaa;
1422
1423 /* Stack item vector. */
1424 VEC(stack_item_t) *si;
1425};
1426
1427/* Pass a value in a sequence of consecutive X registers. The caller
1428 is responsbile for ensuring sufficient registers are available. */
1429
1430static void
1431pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1432 struct aarch64_call_info *info, struct type *type,
1433 const bfd_byte *buf)
1434{
1435 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1436 int len = TYPE_LENGTH (type);
1437 enum type_code typecode = TYPE_CODE (type);
1438 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1439
1440 info->argnum++;
1441
1442 while (len > 0)
1443 {
1444 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1445 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1446 byte_order);
1447
1448
1449 /* Adjust sub-word struct/union args when big-endian. */
1450 if (byte_order == BFD_ENDIAN_BIG
1451 && partial_len < X_REGISTER_SIZE
1452 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1453 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1454
1455 if (aarch64_debug)
1456 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
1457 info->argnum,
1458 gdbarch_register_name (gdbarch, regnum),
1459 phex (regval, X_REGISTER_SIZE));
1460 regcache_cooked_write_unsigned (regcache, regnum, regval);
1461 len -= partial_len;
1462 buf += partial_len;
1463 regnum++;
1464 }
1465}
1466
1467/* Attempt to marshall a value in a V register. Return 1 if
1468 successful, or 0 if insufficient registers are available. This
1469 function, unlike the equivalent pass_in_x() function does not
1470 handle arguments spread across multiple registers. */
1471
1472static int
1473pass_in_v (struct gdbarch *gdbarch,
1474 struct regcache *regcache,
1475 struct aarch64_call_info *info,
1476 const bfd_byte *buf)
1477{
1478 if (info->nsrn < 8)
1479 {
1480 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1481 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1482
1483 info->argnum++;
1484 info->nsrn++;
1485
1486 regcache_cooked_write (regcache, regnum, buf);
1487 if (aarch64_debug)
1488 fprintf_unfiltered (gdb_stdlog, "arg %d in %s\n",
1489 info->argnum,
1490 gdbarch_register_name (gdbarch, regnum));
1491 return 1;
1492 }
1493 info->nsrn = 8;
1494 return 0;
1495}
1496
1497/* Marshall an argument onto the stack. */
1498
1499static void
1500pass_on_stack (struct aarch64_call_info *info, struct type *type,
1501 const bfd_byte *buf)
1502{
1503 int len = TYPE_LENGTH (type);
1504 int align;
1505 stack_item_t item;
1506
1507 info->argnum++;
1508
1509 align = aarch64_type_align (type);
1510
1511 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1512 Natural alignment of the argument's type. */
1513 align = align_up (align, 8);
1514
1515 /* The AArch64 PCS requires at most doubleword alignment. */
1516 if (align > 16)
1517 align = 16;
1518
1519 if (aarch64_debug)
1520 fprintf_unfiltered (gdb_stdlog, "arg %d len=%d @ sp + %d\n",
1521 info->argnum, len, info->nsaa);
1522
1523 item.len = len;
1524 item.data = buf;
1525 VEC_safe_push (stack_item_t, info->si, &item);
1526
1527 info->nsaa += len;
1528 if (info->nsaa & (align - 1))
1529 {
1530 /* Push stack alignment padding. */
1531 int pad = align - (info->nsaa & (align - 1));
1532
1533 item.len = pad;
1534 item.data = buf;
1535
1536 VEC_safe_push (stack_item_t, info->si, &item);
1537 info->nsaa += pad;
1538 }
1539}
1540
1541/* Marshall an argument into a sequence of one or more consecutive X
1542 registers or, if insufficient X registers are available then onto
1543 the stack. */
1544
1545static void
1546pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1547 struct aarch64_call_info *info, struct type *type,
1548 const bfd_byte *buf)
1549{
1550 int len = TYPE_LENGTH (type);
1551 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1552
1553 /* PCS C.13 - Pass in registers if we have enough spare */
1554 if (info->ngrn + nregs <= 8)
1555 {
1556 pass_in_x (gdbarch, regcache, info, type, buf);
1557 info->ngrn += nregs;
1558 }
1559 else
1560 {
1561 info->ngrn = 8;
1562 pass_on_stack (info, type, buf);
1563 }
1564}
1565
1566/* Pass a value in a V register, or on the stack if insufficient are
1567 available. */
1568
1569static void
1570pass_in_v_or_stack (struct gdbarch *gdbarch,
1571 struct regcache *regcache,
1572 struct aarch64_call_info *info,
1573 struct type *type,
1574 const bfd_byte *buf)
1575{
1576 if (!pass_in_v (gdbarch, regcache, info, buf))
1577 pass_on_stack (info, type, buf);
1578}
1579
1580/* Implement the "push_dummy_call" gdbarch method. */
1581
1582static CORE_ADDR
1583aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1584 struct regcache *regcache, CORE_ADDR bp_addr,
1585 int nargs,
1586 struct value **args, CORE_ADDR sp, int struct_return,
1587 CORE_ADDR struct_addr)
1588{
1589 int nstack = 0;
1590 int argnum;
1591 int x_argreg;
1592 int v_argreg;
1593 struct aarch64_call_info info;
1594 struct type *func_type;
1595 struct type *return_type;
1596 int lang_struct_return;
1597
1598 memset (&info, 0, sizeof (info));
1599
1600 /* We need to know what the type of the called function is in order
1601 to determine the number of named/anonymous arguments for the
1602 actual argument placement, and the return type in order to handle
1603 return value correctly.
1604
1605 The generic code above us views the decision of return in memory
1606 or return in registers as a two stage processes. The language
1607 handler is consulted first and may decide to return in memory (eg
1608 class with copy constructor returned by value), this will cause
1609 the generic code to allocate space AND insert an initial leading
1610 argument.
1611
1612 If the language code does not decide to pass in memory then the
1613 target code is consulted.
1614
1615 If the language code decides to pass in memory we want to move
1616 the pointer inserted as the initial argument from the argument
1617 list and into X8, the conventional AArch64 struct return pointer
1618 register.
1619
1620 This is slightly awkward, ideally the flag "lang_struct_return"
1621 would be passed to the targets implementation of push_dummy_call.
1622 Rather that change the target interface we call the language code
1623 directly ourselves. */
1624
1625 func_type = check_typedef (value_type (function));
1626
1627 /* Dereference function pointer types. */
1628 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1629 func_type = TYPE_TARGET_TYPE (func_type);
1630
1631 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1632 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1633
1634 /* If language_pass_by_reference () returned true we will have been
1635 given an additional initial argument, a hidden pointer to the
1636 return slot in memory. */
1637 return_type = TYPE_TARGET_TYPE (func_type);
1638 lang_struct_return = language_pass_by_reference (return_type);
1639
1640 /* Set the return address. For the AArch64, the return breakpoint
1641 is always at BP_ADDR. */
1642 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1643
1644 /* If we were given an initial argument for the return slot because
1645 lang_struct_return was true, lose it. */
1646 if (lang_struct_return)
1647 {
1648 args++;
1649 nargs--;
1650 }
1651
1652 /* The struct_return pointer occupies X8. */
1653 if (struct_return || lang_struct_return)
1654 {
1655 if (aarch64_debug)
1656 fprintf_unfiltered (gdb_stdlog, "struct return in %s = 0x%s\n",
1657 gdbarch_register_name
1658 (gdbarch,
1659 AARCH64_STRUCT_RETURN_REGNUM),
1660 paddress (gdbarch, struct_addr));
1661 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1662 struct_addr);
1663 }
1664
1665 for (argnum = 0; argnum < nargs; argnum++)
1666 {
1667 struct value *arg = args[argnum];
1668 struct type *arg_type;
1669 int len;
1670
1671 arg_type = check_typedef (value_type (arg));
1672 len = TYPE_LENGTH (arg_type);
1673
1674 switch (TYPE_CODE (arg_type))
1675 {
1676 case TYPE_CODE_INT:
1677 case TYPE_CODE_BOOL:
1678 case TYPE_CODE_CHAR:
1679 case TYPE_CODE_RANGE:
1680 case TYPE_CODE_ENUM:
1681 if (len < 4)
1682 {
1683 /* Promote to 32 bit integer. */
1684 if (TYPE_UNSIGNED (arg_type))
1685 arg_type = builtin_type (gdbarch)->builtin_uint32;
1686 else
1687 arg_type = builtin_type (gdbarch)->builtin_int32;
1688 arg = value_cast (arg_type, arg);
1689 }
1690 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1691 value_contents (arg));
1692 break;
1693
1694 case TYPE_CODE_COMPLEX:
1695 if (info.nsrn <= 6)
1696 {
1697 const bfd_byte *buf = value_contents (arg);
1698 struct type *target_type =
1699 check_typedef (TYPE_TARGET_TYPE (arg_type));
1700
1701 pass_in_v (gdbarch, regcache, &info, buf);
1702 pass_in_v (gdbarch, regcache, &info,
1703 buf + TYPE_LENGTH (target_type));
1704 }
1705 else
1706 {
1707 info.nsrn = 8;
1708 pass_on_stack (&info, arg_type, value_contents (arg));
1709 }
1710 break;
1711 case TYPE_CODE_FLT:
1712 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type,
1713 value_contents (arg));
1714 break;
1715
1716 case TYPE_CODE_STRUCT:
1717 case TYPE_CODE_ARRAY:
1718 case TYPE_CODE_UNION:
1719 if (is_hfa (arg_type))
1720 {
1721 int elements = TYPE_NFIELDS (arg_type);
1722
1723 /* Homogeneous Aggregates */
1724 if (info.nsrn + elements < 8)
1725 {
1726 int i;
1727
1728 for (i = 0; i < elements; i++)
1729 {
1730 /* We know that we have sufficient registers
1731 available therefore this will never fallback
1732 to the stack. */
1733 struct value *field =
1734 value_primitive_field (arg, 0, i, arg_type);
1735 struct type *field_type =
1736 check_typedef (value_type (field));
1737
1738 pass_in_v_or_stack (gdbarch, regcache, &info, field_type,
1739 value_contents_writeable (field));
1740 }
1741 }
1742 else
1743 {
1744 info.nsrn = 8;
1745 pass_on_stack (&info, arg_type, value_contents (arg));
1746 }
1747 }
1748 else if (len > 16)
1749 {
1750 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1751 invisible reference. */
1752
1753 /* Allocate aligned storage. */
1754 sp = align_down (sp - len, 16);
1755
1756 /* Write the real data into the stack. */
1757 write_memory (sp, value_contents (arg), len);
1758
1759 /* Construct the indirection. */
1760 arg_type = lookup_pointer_type (arg_type);
1761 arg = value_from_pointer (arg_type, sp);
1762 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1763 value_contents (arg));
1764 }
1765 else
1766 /* PCS C.15 / C.18 multiple values pass. */
1767 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1768 value_contents (arg));
1769 break;
1770
1771 default:
1772 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1773 value_contents (arg));
1774 break;
1775 }
1776 }
1777
1778 /* Make sure stack retains 16 byte alignment. */
1779 if (info.nsaa & 15)
1780 sp -= 16 - (info.nsaa & 15);
1781
1782 while (!VEC_empty (stack_item_t, info.si))
1783 {
1784 stack_item_t *si = VEC_last (stack_item_t, info.si);
1785
1786 sp -= si->len;
1787 write_memory (sp, si->data, si->len);
1788 VEC_pop (stack_item_t, info.si);
1789 }
1790
1791 VEC_free (stack_item_t, info.si);
1792
1793 /* Finally, update the SP register. */
1794 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1795
1796 return sp;
1797}
1798
1799/* Implement the "frame_align" gdbarch method. */
1800
1801static CORE_ADDR
1802aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1803{
1804 /* Align the stack to sixteen bytes. */
1805 return sp & ~(CORE_ADDR) 15;
1806}
1807
1808/* Return the type for an AdvSISD Q register. */
1809
1810static struct type *
1811aarch64_vnq_type (struct gdbarch *gdbarch)
1812{
1813 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1814
1815 if (tdep->vnq_type == NULL)
1816 {
1817 struct type *t;
1818 struct type *elem;
1819
1820 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1821 TYPE_CODE_UNION);
1822
1823 elem = builtin_type (gdbarch)->builtin_uint128;
1824 append_composite_type_field (t, "u", elem);
1825
1826 elem = builtin_type (gdbarch)->builtin_int128;
1827 append_composite_type_field (t, "s", elem);
1828
1829 tdep->vnq_type = t;
1830 }
1831
1832 return tdep->vnq_type;
1833}
1834
1835/* Return the type for an AdvSISD D register. */
1836
1837static struct type *
1838aarch64_vnd_type (struct gdbarch *gdbarch)
1839{
1840 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1841
1842 if (tdep->vnd_type == NULL)
1843 {
1844 struct type *t;
1845 struct type *elem;
1846
1847 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1848 TYPE_CODE_UNION);
1849
1850 elem = builtin_type (gdbarch)->builtin_double;
1851 append_composite_type_field (t, "f", elem);
1852
1853 elem = builtin_type (gdbarch)->builtin_uint64;
1854 append_composite_type_field (t, "u", elem);
1855
1856 elem = builtin_type (gdbarch)->builtin_int64;
1857 append_composite_type_field (t, "s", elem);
1858
1859 tdep->vnd_type = t;
1860 }
1861
1862 return tdep->vnd_type;
1863}
1864
1865/* Return the type for an AdvSISD S register. */
1866
1867static struct type *
1868aarch64_vns_type (struct gdbarch *gdbarch)
1869{
1870 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1871
1872 if (tdep->vns_type == NULL)
1873 {
1874 struct type *t;
1875 struct type *elem;
1876
1877 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1878 TYPE_CODE_UNION);
1879
1880 elem = builtin_type (gdbarch)->builtin_float;
1881 append_composite_type_field (t, "f", elem);
1882
1883 elem = builtin_type (gdbarch)->builtin_uint32;
1884 append_composite_type_field (t, "u", elem);
1885
1886 elem = builtin_type (gdbarch)->builtin_int32;
1887 append_composite_type_field (t, "s", elem);
1888
1889 tdep->vns_type = t;
1890 }
1891
1892 return tdep->vns_type;
1893}
1894
1895/* Return the type for an AdvSISD H register. */
1896
1897static struct type *
1898aarch64_vnh_type (struct gdbarch *gdbarch)
1899{
1900 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1901
1902 if (tdep->vnh_type == NULL)
1903 {
1904 struct type *t;
1905 struct type *elem;
1906
1907 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1908 TYPE_CODE_UNION);
1909
1910 elem = builtin_type (gdbarch)->builtin_uint16;
1911 append_composite_type_field (t, "u", elem);
1912
1913 elem = builtin_type (gdbarch)->builtin_int16;
1914 append_composite_type_field (t, "s", elem);
1915
1916 tdep->vnh_type = t;
1917 }
1918
1919 return tdep->vnh_type;
1920}
1921
1922/* Return the type for an AdvSISD B register. */
1923
1924static struct type *
1925aarch64_vnb_type (struct gdbarch *gdbarch)
1926{
1927 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1928
1929 if (tdep->vnb_type == NULL)
1930 {
1931 struct type *t;
1932 struct type *elem;
1933
1934 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1935 TYPE_CODE_UNION);
1936
1937 elem = builtin_type (gdbarch)->builtin_uint8;
1938 append_composite_type_field (t, "u", elem);
1939
1940 elem = builtin_type (gdbarch)->builtin_int8;
1941 append_composite_type_field (t, "s", elem);
1942
1943 tdep->vnb_type = t;
1944 }
1945
1946 return tdep->vnb_type;
1947}
1948
1949/* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1950
1951static int
1952aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1953{
1954 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1955 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1956
1957 if (reg == AARCH64_DWARF_SP)
1958 return AARCH64_SP_REGNUM;
1959
1960 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1961 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1962
1963 return -1;
1964}
1965\f
1966
1967/* Implement the "print_insn" gdbarch method. */
1968
1969static int
1970aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1971{
1972 info->symbols = NULL;
1973 return print_insn_aarch64 (memaddr, info);
1974}
1975
1976/* AArch64 BRK software debug mode instruction.
1977 Note that AArch64 code is always little-endian.
1978 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
948f8e3d 1979static const gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
07b287a0
MS
1980
1981/* Implement the "breakpoint_from_pc" gdbarch method. */
1982
948f8e3d 1983static const gdb_byte *
07b287a0
MS
1984aarch64_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
1985 int *lenptr)
1986{
1987 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1988
1989 *lenptr = sizeof (aarch64_default_breakpoint);
1990 return aarch64_default_breakpoint;
1991}
1992
1993/* Extract from an array REGS containing the (raw) register state a
1994 function return value of type TYPE, and copy that, in virtual
1995 format, into VALBUF. */
1996
1997static void
1998aarch64_extract_return_value (struct type *type, struct regcache *regs,
1999 gdb_byte *valbuf)
2000{
2001 struct gdbarch *gdbarch = get_regcache_arch (regs);
2002 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2003
2004 if (TYPE_CODE (type) == TYPE_CODE_FLT)
2005 {
2006 bfd_byte buf[V_REGISTER_SIZE];
2007 int len = TYPE_LENGTH (type);
2008
2009 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
2010 memcpy (valbuf, buf, len);
2011 }
2012 else if (TYPE_CODE (type) == TYPE_CODE_INT
2013 || TYPE_CODE (type) == TYPE_CODE_CHAR
2014 || TYPE_CODE (type) == TYPE_CODE_BOOL
2015 || TYPE_CODE (type) == TYPE_CODE_PTR
2016 || TYPE_CODE (type) == TYPE_CODE_REF
2017 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2018 {
2019 /* If the the type is a plain integer, then the access is
2020 straight-forward. Otherwise we have to play around a bit
2021 more. */
2022 int len = TYPE_LENGTH (type);
2023 int regno = AARCH64_X0_REGNUM;
2024 ULONGEST tmp;
2025
2026 while (len > 0)
2027 {
2028 /* By using store_unsigned_integer we avoid having to do
2029 anything special for small big-endian values. */
2030 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2031 store_unsigned_integer (valbuf,
2032 (len > X_REGISTER_SIZE
2033 ? X_REGISTER_SIZE : len), byte_order, tmp);
2034 len -= X_REGISTER_SIZE;
2035 valbuf += X_REGISTER_SIZE;
2036 }
2037 }
2038 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
2039 {
2040 int regno = AARCH64_V0_REGNUM;
2041 bfd_byte buf[V_REGISTER_SIZE];
2042 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
2043 int len = TYPE_LENGTH (target_type);
2044
2045 regcache_cooked_read (regs, regno, buf);
2046 memcpy (valbuf, buf, len);
2047 valbuf += len;
2048 regcache_cooked_read (regs, regno + 1, buf);
2049 memcpy (valbuf, buf, len);
2050 valbuf += len;
2051 }
2052 else if (is_hfa (type))
2053 {
2054 int elements = TYPE_NFIELDS (type);
2055 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
2056 int len = TYPE_LENGTH (member_type);
2057 int i;
2058
2059 for (i = 0; i < elements; i++)
2060 {
2061 int regno = AARCH64_V0_REGNUM + i;
2062 bfd_byte buf[X_REGISTER_SIZE];
2063
2064 if (aarch64_debug)
2065 fprintf_unfiltered (gdb_stdlog,
2066 "read HFA return value element %d from %s\n",
2067 i + 1,
2068 gdbarch_register_name (gdbarch, regno));
2069 regcache_cooked_read (regs, regno, buf);
2070
2071 memcpy (valbuf, buf, len);
2072 valbuf += len;
2073 }
2074 }
2075 else
2076 {
2077 /* For a structure or union the behaviour is as if the value had
2078 been stored to word-aligned memory and then loaded into
2079 registers with 64-bit load instruction(s). */
2080 int len = TYPE_LENGTH (type);
2081 int regno = AARCH64_X0_REGNUM;
2082 bfd_byte buf[X_REGISTER_SIZE];
2083
2084 while (len > 0)
2085 {
2086 regcache_cooked_read (regs, regno++, buf);
2087 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2088 len -= X_REGISTER_SIZE;
2089 valbuf += X_REGISTER_SIZE;
2090 }
2091 }
2092}
2093
2094
2095/* Will a function return an aggregate type in memory or in a
2096 register? Return 0 if an aggregate type can be returned in a
2097 register, 1 if it must be returned in memory. */
2098
2099static int
2100aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2101{
2102 int nRc;
2103 enum type_code code;
2104
2105 CHECK_TYPEDEF (type);
2106
2107 /* In the AArch64 ABI, "integer" like aggregate types are returned
2108 in registers. For an aggregate type to be integer like, its size
2109 must be less than or equal to 4 * X_REGISTER_SIZE. */
2110
2111 if (is_hfa (type))
2112 {
2113 /* PCS B.5 If the argument is a Named HFA, then the argument is
2114 used unmodified. */
2115 return 0;
2116 }
2117
2118 if (TYPE_LENGTH (type) > 16)
2119 {
2120 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2121 invisible reference. */
2122
2123 return 1;
2124 }
2125
2126 return 0;
2127}
2128
2129/* Write into appropriate registers a function return value of type
2130 TYPE, given in virtual format. */
2131
2132static void
2133aarch64_store_return_value (struct type *type, struct regcache *regs,
2134 const gdb_byte *valbuf)
2135{
2136 struct gdbarch *gdbarch = get_regcache_arch (regs);
2137 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2138
2139 if (TYPE_CODE (type) == TYPE_CODE_FLT)
2140 {
2141 bfd_byte buf[V_REGISTER_SIZE];
2142 int len = TYPE_LENGTH (type);
2143
2144 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2145 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
2146 }
2147 else if (TYPE_CODE (type) == TYPE_CODE_INT
2148 || TYPE_CODE (type) == TYPE_CODE_CHAR
2149 || TYPE_CODE (type) == TYPE_CODE_BOOL
2150 || TYPE_CODE (type) == TYPE_CODE_PTR
2151 || TYPE_CODE (type) == TYPE_CODE_REF
2152 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2153 {
2154 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2155 {
2156 /* Values of one word or less are zero/sign-extended and
2157 returned in r0. */
2158 bfd_byte tmpbuf[X_REGISTER_SIZE];
2159 LONGEST val = unpack_long (type, valbuf);
2160
2161 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2162 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
2163 }
2164 else
2165 {
2166 /* Integral values greater than one word are stored in
2167 consecutive registers starting with r0. This will always
2168 be a multiple of the regiser size. */
2169 int len = TYPE_LENGTH (type);
2170 int regno = AARCH64_X0_REGNUM;
2171
2172 while (len > 0)
2173 {
2174 regcache_cooked_write (regs, regno++, valbuf);
2175 len -= X_REGISTER_SIZE;
2176 valbuf += X_REGISTER_SIZE;
2177 }
2178 }
2179 }
2180 else if (is_hfa (type))
2181 {
2182 int elements = TYPE_NFIELDS (type);
2183 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
2184 int len = TYPE_LENGTH (member_type);
2185 int i;
2186
2187 for (i = 0; i < elements; i++)
2188 {
2189 int regno = AARCH64_V0_REGNUM + i;
2190 bfd_byte tmpbuf[MAX_REGISTER_SIZE];
2191
2192 if (aarch64_debug)
2193 fprintf_unfiltered (gdb_stdlog,
2194 "write HFA return value element %d to %s\n",
2195 i + 1,
2196 gdbarch_register_name (gdbarch, regno));
2197
2198 memcpy (tmpbuf, valbuf, len);
2199 regcache_cooked_write (regs, regno, tmpbuf);
2200 valbuf += len;
2201 }
2202 }
2203 else
2204 {
2205 /* For a structure or union the behaviour is as if the value had
2206 been stored to word-aligned memory and then loaded into
2207 registers with 64-bit load instruction(s). */
2208 int len = TYPE_LENGTH (type);
2209 int regno = AARCH64_X0_REGNUM;
2210 bfd_byte tmpbuf[X_REGISTER_SIZE];
2211
2212 while (len > 0)
2213 {
2214 memcpy (tmpbuf, valbuf,
2215 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2216 regcache_cooked_write (regs, regno++, tmpbuf);
2217 len -= X_REGISTER_SIZE;
2218 valbuf += X_REGISTER_SIZE;
2219 }
2220 }
2221}
2222
2223/* Implement the "return_value" gdbarch method. */
2224
2225static enum return_value_convention
2226aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2227 struct type *valtype, struct regcache *regcache,
2228 gdb_byte *readbuf, const gdb_byte *writebuf)
2229{
2230 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2231
2232 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2233 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2234 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2235 {
2236 if (aarch64_return_in_memory (gdbarch, valtype))
2237 {
2238 if (aarch64_debug)
2239 fprintf_unfiltered (gdb_stdlog, "return value in memory\n");
2240 return RETURN_VALUE_STRUCT_CONVENTION;
2241 }
2242 }
2243
2244 if (writebuf)
2245 aarch64_store_return_value (valtype, regcache, writebuf);
2246
2247 if (readbuf)
2248 aarch64_extract_return_value (valtype, regcache, readbuf);
2249
2250 if (aarch64_debug)
2251 fprintf_unfiltered (gdb_stdlog, "return value in registers\n");
2252
2253 return RETURN_VALUE_REGISTER_CONVENTION;
2254}
2255
2256/* Implement the "get_longjmp_target" gdbarch method. */
2257
2258static int
2259aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2260{
2261 CORE_ADDR jb_addr;
2262 gdb_byte buf[X_REGISTER_SIZE];
2263 struct gdbarch *gdbarch = get_frame_arch (frame);
2264 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2265 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2266
2267 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2268
2269 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2270 X_REGISTER_SIZE))
2271 return 0;
2272
2273 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2274 return 1;
2275}
2276\f
2277
2278/* Return the pseudo register name corresponding to register regnum. */
2279
2280static const char *
2281aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2282{
2283 static const char *const q_name[] =
2284 {
2285 "q0", "q1", "q2", "q3",
2286 "q4", "q5", "q6", "q7",
2287 "q8", "q9", "q10", "q11",
2288 "q12", "q13", "q14", "q15",
2289 "q16", "q17", "q18", "q19",
2290 "q20", "q21", "q22", "q23",
2291 "q24", "q25", "q26", "q27",
2292 "q28", "q29", "q30", "q31",
2293 };
2294
2295 static const char *const d_name[] =
2296 {
2297 "d0", "d1", "d2", "d3",
2298 "d4", "d5", "d6", "d7",
2299 "d8", "d9", "d10", "d11",
2300 "d12", "d13", "d14", "d15",
2301 "d16", "d17", "d18", "d19",
2302 "d20", "d21", "d22", "d23",
2303 "d24", "d25", "d26", "d27",
2304 "d28", "d29", "d30", "d31",
2305 };
2306
2307 static const char *const s_name[] =
2308 {
2309 "s0", "s1", "s2", "s3",
2310 "s4", "s5", "s6", "s7",
2311 "s8", "s9", "s10", "s11",
2312 "s12", "s13", "s14", "s15",
2313 "s16", "s17", "s18", "s19",
2314 "s20", "s21", "s22", "s23",
2315 "s24", "s25", "s26", "s27",
2316 "s28", "s29", "s30", "s31",
2317 };
2318
2319 static const char *const h_name[] =
2320 {
2321 "h0", "h1", "h2", "h3",
2322 "h4", "h5", "h6", "h7",
2323 "h8", "h9", "h10", "h11",
2324 "h12", "h13", "h14", "h15",
2325 "h16", "h17", "h18", "h19",
2326 "h20", "h21", "h22", "h23",
2327 "h24", "h25", "h26", "h27",
2328 "h28", "h29", "h30", "h31",
2329 };
2330
2331 static const char *const b_name[] =
2332 {
2333 "b0", "b1", "b2", "b3",
2334 "b4", "b5", "b6", "b7",
2335 "b8", "b9", "b10", "b11",
2336 "b12", "b13", "b14", "b15",
2337 "b16", "b17", "b18", "b19",
2338 "b20", "b21", "b22", "b23",
2339 "b24", "b25", "b26", "b27",
2340 "b28", "b29", "b30", "b31",
2341 };
2342
2343 regnum -= gdbarch_num_regs (gdbarch);
2344
2345 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2346 return q_name[regnum - AARCH64_Q0_REGNUM];
2347
2348 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2349 return d_name[regnum - AARCH64_D0_REGNUM];
2350
2351 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2352 return s_name[regnum - AARCH64_S0_REGNUM];
2353
2354 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2355 return h_name[regnum - AARCH64_H0_REGNUM];
2356
2357 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2358 return b_name[regnum - AARCH64_B0_REGNUM];
2359
2360 internal_error (__FILE__, __LINE__,
2361 _("aarch64_pseudo_register_name: bad register number %d"),
2362 regnum);
2363}
2364
2365/* Implement the "pseudo_register_type" tdesc_arch_data method. */
2366
2367static struct type *
2368aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2369{
2370 regnum -= gdbarch_num_regs (gdbarch);
2371
2372 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2373 return aarch64_vnq_type (gdbarch);
2374
2375 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2376 return aarch64_vnd_type (gdbarch);
2377
2378 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2379 return aarch64_vns_type (gdbarch);
2380
2381 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2382 return aarch64_vnh_type (gdbarch);
2383
2384 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2385 return aarch64_vnb_type (gdbarch);
2386
2387 internal_error (__FILE__, __LINE__,
2388 _("aarch64_pseudo_register_type: bad register number %d"),
2389 regnum);
2390}
2391
2392/* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2393
2394static int
2395aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2396 struct reggroup *group)
2397{
2398 regnum -= gdbarch_num_regs (gdbarch);
2399
2400 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2401 return group == all_reggroup || group == vector_reggroup;
2402 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2403 return (group == all_reggroup || group == vector_reggroup
2404 || group == float_reggroup);
2405 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2406 return (group == all_reggroup || group == vector_reggroup
2407 || group == float_reggroup);
2408 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2409 return group == all_reggroup || group == vector_reggroup;
2410 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2411 return group == all_reggroup || group == vector_reggroup;
2412
2413 return group == all_reggroup;
2414}
2415
2416/* Implement the "pseudo_register_read_value" gdbarch method. */
2417
2418static struct value *
2419aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2420 struct regcache *regcache,
2421 int regnum)
2422{
2423 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2424 struct value *result_value;
2425 gdb_byte *buf;
2426
2427 result_value = allocate_value (register_type (gdbarch, regnum));
2428 VALUE_LVAL (result_value) = lval_register;
2429 VALUE_REGNUM (result_value) = regnum;
2430 buf = value_contents_raw (result_value);
2431
2432 regnum -= gdbarch_num_regs (gdbarch);
2433
2434 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2435 {
2436 enum register_status status;
2437 unsigned v_regnum;
2438
2439 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2440 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2441 if (status != REG_VALID)
2442 mark_value_bytes_unavailable (result_value, 0,
2443 TYPE_LENGTH (value_type (result_value)));
2444 else
2445 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2446 return result_value;
2447 }
2448
2449 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2450 {
2451 enum register_status status;
2452 unsigned v_regnum;
2453
2454 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2455 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2456 if (status != REG_VALID)
2457 mark_value_bytes_unavailable (result_value, 0,
2458 TYPE_LENGTH (value_type (result_value)));
2459 else
2460 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2461 return result_value;
2462 }
2463
2464 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2465 {
2466 enum register_status status;
2467 unsigned v_regnum;
2468
2469 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2470 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2471 memcpy (buf, reg_buf, S_REGISTER_SIZE);
2472 return result_value;
2473 }
2474
2475 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2476 {
2477 enum register_status status;
2478 unsigned v_regnum;
2479
2480 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2481 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2482 if (status != REG_VALID)
2483 mark_value_bytes_unavailable (result_value, 0,
2484 TYPE_LENGTH (value_type (result_value)));
2485 else
2486 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2487 return result_value;
2488 }
2489
2490 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2491 {
2492 enum register_status status;
2493 unsigned v_regnum;
2494
2495 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2496 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2497 if (status != REG_VALID)
2498 mark_value_bytes_unavailable (result_value, 0,
2499 TYPE_LENGTH (value_type (result_value)));
2500 else
2501 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2502 return result_value;
2503 }
2504
2505 gdb_assert_not_reached ("regnum out of bound");
2506}
2507
2508/* Implement the "pseudo_register_write" gdbarch method. */
2509
2510static void
2511aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2512 int regnum, const gdb_byte *buf)
2513{
2514 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2515
2516 /* Ensure the register buffer is zero, we want gdb writes of the
2517 various 'scalar' pseudo registers to behavior like architectural
2518 writes, register width bytes are written the remainder are set to
2519 zero. */
2520 memset (reg_buf, 0, sizeof (reg_buf));
2521
2522 regnum -= gdbarch_num_regs (gdbarch);
2523
2524 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2525 {
2526 /* pseudo Q registers */
2527 unsigned v_regnum;
2528
2529 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2530 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2531 regcache_raw_write (regcache, v_regnum, reg_buf);
2532 return;
2533 }
2534
2535 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2536 {
2537 /* pseudo D registers */
2538 unsigned v_regnum;
2539
2540 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2541 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2542 regcache_raw_write (regcache, v_regnum, reg_buf);
2543 return;
2544 }
2545
2546 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2547 {
2548 unsigned v_regnum;
2549
2550 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2551 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2552 regcache_raw_write (regcache, v_regnum, reg_buf);
2553 return;
2554 }
2555
2556 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2557 {
2558 /* pseudo H registers */
2559 unsigned v_regnum;
2560
2561 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2562 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2563 regcache_raw_write (regcache, v_regnum, reg_buf);
2564 return;
2565 }
2566
2567 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2568 {
2569 /* pseudo B registers */
2570 unsigned v_regnum;
2571
2572 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2573 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2574 regcache_raw_write (regcache, v_regnum, reg_buf);
2575 return;
2576 }
2577
2578 gdb_assert_not_reached ("regnum out of bound");
2579}
2580
07b287a0
MS
2581/* Callback function for user_reg_add. */
2582
2583static struct value *
2584value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2585{
2586 const int *reg_p = baton;
2587
2588 return value_of_register (*reg_p, frame);
2589}
2590\f
2591
9404b58f
KM
2592/* Implement the "software_single_step" gdbarch method, needed to
2593 single step through atomic sequences on AArch64. */
2594
2595static int
2596aarch64_software_single_step (struct frame_info *frame)
2597{
2598 struct gdbarch *gdbarch = get_frame_arch (frame);
2599 struct address_space *aspace = get_frame_address_space (frame);
2600 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2601 const int insn_size = 4;
2602 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2603 CORE_ADDR pc = get_frame_pc (frame);
2604 CORE_ADDR breaks[2] = { -1, -1 };
2605 CORE_ADDR loc = pc;
2606 CORE_ADDR closing_insn = 0;
2607 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2608 byte_order_for_code);
2609 int index;
2610 int insn_count;
2611 int bc_insn_count = 0; /* Conditional branch instruction count. */
2612 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2613
2614 /* Look for a Load Exclusive instruction which begins the sequence. */
2615 if (!decode_masked_match (insn, 0x3fc00000, 0x08400000))
2616 return 0;
2617
2618 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2619 {
2620 int32_t offset;
2621 unsigned cond;
2622
2623 loc += insn_size;
2624 insn = read_memory_unsigned_integer (loc, insn_size,
2625 byte_order_for_code);
2626
2627 /* Check if the instruction is a conditional branch. */
2628 if (decode_bcond (loc, insn, &cond, &offset))
2629 {
2630 if (bc_insn_count >= 1)
2631 return 0;
2632
2633 /* It is, so we'll try to set a breakpoint at the destination. */
2634 breaks[1] = loc + offset;
2635
2636 bc_insn_count++;
2637 last_breakpoint++;
2638 }
2639
2640 /* Look for the Store Exclusive which closes the atomic sequence. */
2641 if (decode_masked_match (insn, 0x3fc00000, 0x08000000))
2642 {
2643 closing_insn = loc;
2644 break;
2645 }
2646 }
2647
2648 /* We didn't find a closing Store Exclusive instruction, fall back. */
2649 if (!closing_insn)
2650 return 0;
2651
2652 /* Insert breakpoint after the end of the atomic sequence. */
2653 breaks[0] = loc + insn_size;
2654
2655 /* Check for duplicated breakpoints, and also check that the second
2656 breakpoint is not within the atomic sequence. */
2657 if (last_breakpoint
2658 && (breaks[1] == breaks[0]
2659 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2660 last_breakpoint = 0;
2661
2662 /* Insert the breakpoint at the end of the sequence, and one at the
2663 destination of the conditional branch, if it exists. */
2664 for (index = 0; index <= last_breakpoint; index++)
2665 insert_single_step_breakpoint (gdbarch, aspace, breaks[index]);
2666
2667 return 1;
2668}
2669
07b287a0
MS
2670/* Initialize the current architecture based on INFO. If possible,
2671 re-use an architecture from ARCHES, which is a list of
2672 architectures already created during this debugging session.
2673
2674 Called e.g. at program startup, when reading a core file, and when
2675 reading a binary file. */
2676
2677static struct gdbarch *
2678aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2679{
2680 struct gdbarch_tdep *tdep;
2681 struct gdbarch *gdbarch;
2682 struct gdbarch_list *best_arch;
2683 struct tdesc_arch_data *tdesc_data = NULL;
2684 const struct target_desc *tdesc = info.target_desc;
2685 int i;
2686 int have_fpa_registers = 1;
2687 int valid_p = 1;
2688 const struct tdesc_feature *feature;
2689 int num_regs = 0;
2690 int num_pseudo_regs = 0;
2691
2692 /* Ensure we always have a target descriptor. */
2693 if (!tdesc_has_registers (tdesc))
2694 tdesc = tdesc_aarch64;
2695
2696 gdb_assert (tdesc);
2697
2698 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2699
2700 if (feature == NULL)
2701 return NULL;
2702
2703 tdesc_data = tdesc_data_alloc ();
2704
2705 /* Validate the descriptor provides the mandatory core R registers
2706 and allocate their numbers. */
2707 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2708 valid_p &=
2709 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2710 aarch64_r_register_names[i]);
2711
2712 num_regs = AARCH64_X0_REGNUM + i;
2713
2714 /* Look for the V registers. */
2715 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2716 if (feature)
2717 {
2718 /* Validate the descriptor provides the mandatory V registers
2719 and allocate their numbers. */
2720 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2721 valid_p &=
2722 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2723 aarch64_v_register_names[i]);
2724
2725 num_regs = AARCH64_V0_REGNUM + i;
2726
2727 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2728 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2729 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2730 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2731 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2732 }
2733
2734 if (!valid_p)
2735 {
2736 tdesc_data_cleanup (tdesc_data);
2737 return NULL;
2738 }
2739
2740 /* AArch64 code is always little-endian. */
2741 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2742
2743 /* If there is already a candidate, use it. */
2744 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2745 best_arch != NULL;
2746 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2747 {
2748 /* Found a match. */
2749 break;
2750 }
2751
2752 if (best_arch != NULL)
2753 {
2754 if (tdesc_data != NULL)
2755 tdesc_data_cleanup (tdesc_data);
2756 return best_arch->gdbarch;
2757 }
2758
2759 tdep = xcalloc (1, sizeof (struct gdbarch_tdep));
2760 gdbarch = gdbarch_alloc (&info, tdep);
2761
2762 /* This should be low enough for everything. */
2763 tdep->lowest_pc = 0x20;
2764 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2765 tdep->jb_elt_size = 8;
2766
2767 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2768 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2769
07b287a0
MS
2770 /* Frame handling. */
2771 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2772 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2773 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2774
2775 /* Advance PC across function entry code. */
2776 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2777
2778 /* The stack grows downward. */
2779 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2780
2781 /* Breakpoint manipulation. */
2782 set_gdbarch_breakpoint_from_pc (gdbarch, aarch64_breakpoint_from_pc);
07b287a0 2783 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
9404b58f 2784 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
07b287a0
MS
2785
2786 /* Information about registers, etc. */
2787 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2788 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2789 set_gdbarch_num_regs (gdbarch, num_regs);
2790
2791 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2792 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2793 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2794 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2795 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2796 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2797 aarch64_pseudo_register_reggroup_p);
2798
2799 /* ABI */
2800 set_gdbarch_short_bit (gdbarch, 16);
2801 set_gdbarch_int_bit (gdbarch, 32);
2802 set_gdbarch_float_bit (gdbarch, 32);
2803 set_gdbarch_double_bit (gdbarch, 64);
2804 set_gdbarch_long_double_bit (gdbarch, 128);
2805 set_gdbarch_long_bit (gdbarch, 64);
2806 set_gdbarch_long_long_bit (gdbarch, 64);
2807 set_gdbarch_ptr_bit (gdbarch, 64);
2808 set_gdbarch_char_signed (gdbarch, 0);
2809 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2810 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2811 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2812
2813 /* Internal <-> external register number maps. */
2814 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
2815
2816 /* Returning results. */
2817 set_gdbarch_return_value (gdbarch, aarch64_return_value);
2818
2819 /* Disassembly. */
2820 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
2821
2822 /* Virtual tables. */
2823 set_gdbarch_vbit_in_delta (gdbarch, 1);
2824
2825 /* Hook in the ABI-specific overrides, if they have been registered. */
2826 info.target_desc = tdesc;
2827 info.tdep_info = (void *) tdesc_data;
2828 gdbarch_init_osabi (info, gdbarch);
2829
2830 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
2831
2832 /* Add some default predicates. */
2833 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
2834 dwarf2_append_unwinders (gdbarch);
2835 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
2836
2837 frame_base_set_default (gdbarch, &aarch64_normal_base);
2838
2839 /* Now we have tuned the configuration, set a few final things,
2840 based on what the OS ABI has told us. */
2841
2842 if (tdep->jb_pc >= 0)
2843 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
2844
2845 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
2846
2847 /* Add standard register aliases. */
2848 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
2849 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
2850 value_of_aarch64_user_reg,
2851 &aarch64_register_aliases[i].regnum);
2852
2853 return gdbarch;
2854}
2855
2856static void
2857aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
2858{
2859 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2860
2861 if (tdep == NULL)
2862 return;
2863
2864 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
2865 paddress (gdbarch, tdep->lowest_pc));
2866}
2867
2868/* Suppress warning from -Wmissing-prototypes. */
2869extern initialize_file_ftype _initialize_aarch64_tdep;
2870
2871void
2872_initialize_aarch64_tdep (void)
2873{
2874 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
2875 aarch64_dump_tdep);
2876
2877 initialize_tdesc_aarch64 ();
07b287a0
MS
2878
2879 /* Debug this file's internals. */
2880 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
2881Set AArch64 debugging."), _("\
2882Show AArch64 debugging."), _("\
2883When on, AArch64 specific debugging is enabled."),
2884 NULL,
2885 show_aarch64_debug,
2886 &setdebuglist, &showdebuglist);
2887}
99afc88b
OJ
2888
2889/* AArch64 process record-replay related structures, defines etc. */
2890
2891#define submask(x) ((1L << ((x) + 1)) - 1)
2892#define bit(obj,st) (((obj) >> (st)) & 1)
2893#define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
2894
2895#define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
2896 do \
2897 { \
2898 unsigned int reg_len = LENGTH; \
2899 if (reg_len) \
2900 { \
2901 REGS = XNEWVEC (uint32_t, reg_len); \
2902 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
2903 } \
2904 } \
2905 while (0)
2906
2907#define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
2908 do \
2909 { \
2910 unsigned int mem_len = LENGTH; \
2911 if (mem_len) \
2912 { \
2913 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
2914 memcpy(&MEMS->len, &RECORD_BUF[0], \
2915 sizeof(struct aarch64_mem_r) * LENGTH); \
2916 } \
2917 } \
2918 while (0)
2919
2920/* AArch64 record/replay structures and enumerations. */
2921
2922struct aarch64_mem_r
2923{
2924 uint64_t len; /* Record length. */
2925 uint64_t addr; /* Memory address. */
2926};
2927
2928enum aarch64_record_result
2929{
2930 AARCH64_RECORD_SUCCESS,
2931 AARCH64_RECORD_FAILURE,
2932 AARCH64_RECORD_UNSUPPORTED,
2933 AARCH64_RECORD_UNKNOWN
2934};
2935
2936typedef struct insn_decode_record_t
2937{
2938 struct gdbarch *gdbarch;
2939 struct regcache *regcache;
2940 CORE_ADDR this_addr; /* Address of insn to be recorded. */
2941 uint32_t aarch64_insn; /* Insn to be recorded. */
2942 uint32_t mem_rec_count; /* Count of memory records. */
2943 uint32_t reg_rec_count; /* Count of register records. */
2944 uint32_t *aarch64_regs; /* Registers to be recorded. */
2945 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
2946} insn_decode_record;
2947
2948/* Record handler for data processing - register instructions. */
2949
2950static unsigned int
2951aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
2952{
2953 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
2954 uint32_t record_buf[4];
2955
2956 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2957 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2958 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
2959
2960 if (!bit (aarch64_insn_r->aarch64_insn, 28))
2961 {
2962 uint8_t setflags;
2963
2964 /* Logical (shifted register). */
2965 if (insn_bits24_27 == 0x0a)
2966 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
2967 /* Add/subtract. */
2968 else if (insn_bits24_27 == 0x0b)
2969 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
2970 else
2971 return AARCH64_RECORD_UNKNOWN;
2972
2973 record_buf[0] = reg_rd;
2974 aarch64_insn_r->reg_rec_count = 1;
2975 if (setflags)
2976 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2977 }
2978 else
2979 {
2980 if (insn_bits24_27 == 0x0b)
2981 {
2982 /* Data-processing (3 source). */
2983 record_buf[0] = reg_rd;
2984 aarch64_insn_r->reg_rec_count = 1;
2985 }
2986 else if (insn_bits24_27 == 0x0a)
2987 {
2988 if (insn_bits21_23 == 0x00)
2989 {
2990 /* Add/subtract (with carry). */
2991 record_buf[0] = reg_rd;
2992 aarch64_insn_r->reg_rec_count = 1;
2993 if (bit (aarch64_insn_r->aarch64_insn, 29))
2994 {
2995 record_buf[1] = AARCH64_CPSR_REGNUM;
2996 aarch64_insn_r->reg_rec_count = 2;
2997 }
2998 }
2999 else if (insn_bits21_23 == 0x02)
3000 {
3001 /* Conditional compare (register) and conditional compare
3002 (immediate) instructions. */
3003 record_buf[0] = AARCH64_CPSR_REGNUM;
3004 aarch64_insn_r->reg_rec_count = 1;
3005 }
3006 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3007 {
3008 /* CConditional select. */
3009 /* Data-processing (2 source). */
3010 /* Data-processing (1 source). */
3011 record_buf[0] = reg_rd;
3012 aarch64_insn_r->reg_rec_count = 1;
3013 }
3014 else
3015 return AARCH64_RECORD_UNKNOWN;
3016 }
3017 }
3018
3019 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3020 record_buf);
3021 return AARCH64_RECORD_SUCCESS;
3022}
3023
3024/* Record handler for data processing - immediate instructions. */
3025
3026static unsigned int
3027aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3028{
3029 uint8_t reg_rd, insn_bit28, insn_bit23, insn_bits24_27, setflags;
3030 uint32_t record_buf[4];
3031
3032 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3033 insn_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3034 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3035 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3036
3037 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3038 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3039 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3040 {
3041 record_buf[0] = reg_rd;
3042 aarch64_insn_r->reg_rec_count = 1;
3043 }
3044 else if (insn_bits24_27 == 0x01)
3045 {
3046 /* Add/Subtract (immediate). */
3047 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3048 record_buf[0] = reg_rd;
3049 aarch64_insn_r->reg_rec_count = 1;
3050 if (setflags)
3051 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3052 }
3053 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3054 {
3055 /* Logical (immediate). */
3056 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3057 record_buf[0] = reg_rd;
3058 aarch64_insn_r->reg_rec_count = 1;
3059 if (setflags)
3060 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3061 }
3062 else
3063 return AARCH64_RECORD_UNKNOWN;
3064
3065 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3066 record_buf);
3067 return AARCH64_RECORD_SUCCESS;
3068}
3069
3070/* Record handler for branch, exception generation and system instructions. */
3071
3072static unsigned int
3073aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3074{
3075 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3076 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3077 uint32_t record_buf[4];
3078
3079 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3080 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3081 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3082
3083 if (insn_bits28_31 == 0x0d)
3084 {
3085 /* Exception generation instructions. */
3086 if (insn_bits24_27 == 0x04)
3087 {
5d98d3cd
YQ
3088 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3089 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3090 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
99afc88b
OJ
3091 {
3092 ULONGEST svc_number;
3093
3094 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3095 &svc_number);
3096 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3097 svc_number);
3098 }
3099 else
3100 return AARCH64_RECORD_UNSUPPORTED;
3101 }
3102 /* System instructions. */
3103 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3104 {
3105 uint32_t reg_rt, reg_crn;
3106
3107 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3108 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3109
3110 /* Record rt in case of sysl and mrs instructions. */
3111 if (bit (aarch64_insn_r->aarch64_insn, 21))
3112 {
3113 record_buf[0] = reg_rt;
3114 aarch64_insn_r->reg_rec_count = 1;
3115 }
3116 /* Record cpsr for hint and msr(immediate) instructions. */
3117 else if (reg_crn == 0x02 || reg_crn == 0x04)
3118 {
3119 record_buf[0] = AARCH64_CPSR_REGNUM;
3120 aarch64_insn_r->reg_rec_count = 1;
3121 }
3122 }
3123 /* Unconditional branch (register). */
3124 else if((insn_bits24_27 & 0x0e) == 0x06)
3125 {
3126 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3127 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3128 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3129 }
3130 else
3131 return AARCH64_RECORD_UNKNOWN;
3132 }
3133 /* Unconditional branch (immediate). */
3134 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3135 {
3136 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3137 if (bit (aarch64_insn_r->aarch64_insn, 31))
3138 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3139 }
3140 else
3141 /* Compare & branch (immediate), Test & branch (immediate) and
3142 Conditional branch (immediate). */
3143 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3144
3145 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3146 record_buf);
3147 return AARCH64_RECORD_SUCCESS;
3148}
3149
3150/* Record handler for advanced SIMD load and store instructions. */
3151
3152static unsigned int
3153aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3154{
3155 CORE_ADDR address;
3156 uint64_t addr_offset = 0;
3157 uint32_t record_buf[24];
3158 uint64_t record_buf_mem[24];
3159 uint32_t reg_rn, reg_rt;
3160 uint32_t reg_index = 0, mem_index = 0;
3161 uint8_t opcode_bits, size_bits;
3162
3163 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3164 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3165 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3166 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3167 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3168
3169 if (record_debug)
3170 {
3171 fprintf_unfiltered (gdb_stdlog,
3172 "Process record: Advanced SIMD load/store\n");
3173 }
3174
3175 /* Load/store single structure. */
3176 if (bit (aarch64_insn_r->aarch64_insn, 24))
3177 {
3178 uint8_t sindex, scale, selem, esize, replicate = 0;
3179 scale = opcode_bits >> 2;
3180 selem = ((opcode_bits & 0x02) |
3181 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3182 switch (scale)
3183 {
3184 case 1:
3185 if (size_bits & 0x01)
3186 return AARCH64_RECORD_UNKNOWN;
3187 break;
3188 case 2:
3189 if ((size_bits >> 1) & 0x01)
3190 return AARCH64_RECORD_UNKNOWN;
3191 if (size_bits & 0x01)
3192 {
3193 if (!((opcode_bits >> 1) & 0x01))
3194 scale = 3;
3195 else
3196 return AARCH64_RECORD_UNKNOWN;
3197 }
3198 break;
3199 case 3:
3200 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3201 {
3202 scale = size_bits;
3203 replicate = 1;
3204 break;
3205 }
3206 else
3207 return AARCH64_RECORD_UNKNOWN;
3208 default:
3209 break;
3210 }
3211 esize = 8 << scale;
3212 if (replicate)
3213 for (sindex = 0; sindex < selem; sindex++)
3214 {
3215 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3216 reg_rt = (reg_rt + 1) % 32;
3217 }
3218 else
3219 {
3220 for (sindex = 0; sindex < selem; sindex++)
3221 if (bit (aarch64_insn_r->aarch64_insn, 22))
3222 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3223 else
3224 {
3225 record_buf_mem[mem_index++] = esize / 8;
3226 record_buf_mem[mem_index++] = address + addr_offset;
3227 }
3228 addr_offset = addr_offset + (esize / 8);
3229 reg_rt = (reg_rt + 1) % 32;
3230 }
3231 }
3232 /* Load/store multiple structure. */
3233 else
3234 {
3235 uint8_t selem, esize, rpt, elements;
3236 uint8_t eindex, rindex;
3237
3238 esize = 8 << size_bits;
3239 if (bit (aarch64_insn_r->aarch64_insn, 30))
3240 elements = 128 / esize;
3241 else
3242 elements = 64 / esize;
3243
3244 switch (opcode_bits)
3245 {
3246 /*LD/ST4 (4 Registers). */
3247 case 0:
3248 rpt = 1;
3249 selem = 4;
3250 break;
3251 /*LD/ST1 (4 Registers). */
3252 case 2:
3253 rpt = 4;
3254 selem = 1;
3255 break;
3256 /*LD/ST3 (3 Registers). */
3257 case 4:
3258 rpt = 1;
3259 selem = 3;
3260 break;
3261 /*LD/ST1 (3 Registers). */
3262 case 6:
3263 rpt = 3;
3264 selem = 1;
3265 break;
3266 /*LD/ST1 (1 Register). */
3267 case 7:
3268 rpt = 1;
3269 selem = 1;
3270 break;
3271 /*LD/ST2 (2 Registers). */
3272 case 8:
3273 rpt = 1;
3274 selem = 2;
3275 break;
3276 /*LD/ST1 (2 Registers). */
3277 case 10:
3278 rpt = 2;
3279 selem = 1;
3280 break;
3281 default:
3282 return AARCH64_RECORD_UNSUPPORTED;
3283 break;
3284 }
3285 for (rindex = 0; rindex < rpt; rindex++)
3286 for (eindex = 0; eindex < elements; eindex++)
3287 {
3288 uint8_t reg_tt, sindex;
3289 reg_tt = (reg_rt + rindex) % 32;
3290 for (sindex = 0; sindex < selem; sindex++)
3291 {
3292 if (bit (aarch64_insn_r->aarch64_insn, 22))
3293 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3294 else
3295 {
3296 record_buf_mem[mem_index++] = esize / 8;
3297 record_buf_mem[mem_index++] = address + addr_offset;
3298 }
3299 addr_offset = addr_offset + (esize / 8);
3300 reg_tt = (reg_tt + 1) % 32;
3301 }
3302 }
3303 }
3304
3305 if (bit (aarch64_insn_r->aarch64_insn, 23))
3306 record_buf[reg_index++] = reg_rn;
3307
3308 aarch64_insn_r->reg_rec_count = reg_index;
3309 aarch64_insn_r->mem_rec_count = mem_index / 2;
3310 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3311 record_buf_mem);
3312 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3313 record_buf);
3314 return AARCH64_RECORD_SUCCESS;
3315}
3316
3317/* Record handler for load and store instructions. */
3318
3319static unsigned int
3320aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3321{
3322 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3323 uint8_t insn_bit23, insn_bit21;
3324 uint8_t opc, size_bits, ld_flag, vector_flag;
3325 uint32_t reg_rn, reg_rt, reg_rt2;
3326 uint64_t datasize, offset;
3327 uint32_t record_buf[8];
3328 uint64_t record_buf_mem[8];
3329 CORE_ADDR address;
3330
3331 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3332 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3333 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3334 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3335 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3336 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3337 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3338 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3339 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3340 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3341 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3342
3343 /* Load/store exclusive. */
3344 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3345 {
3346 if (record_debug)
3347 {
3348 fprintf_unfiltered (gdb_stdlog,
3349 "Process record: load/store exclusive\n");
3350 }
3351
3352 if (ld_flag)
3353 {
3354 record_buf[0] = reg_rt;
3355 aarch64_insn_r->reg_rec_count = 1;
3356 if (insn_bit21)
3357 {
3358 record_buf[1] = reg_rt2;
3359 aarch64_insn_r->reg_rec_count = 2;
3360 }
3361 }
3362 else
3363 {
3364 if (insn_bit21)
3365 datasize = (8 << size_bits) * 2;
3366 else
3367 datasize = (8 << size_bits);
3368 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3369 &address);
3370 record_buf_mem[0] = datasize / 8;
3371 record_buf_mem[1] = address;
3372 aarch64_insn_r->mem_rec_count = 1;
3373 if (!insn_bit23)
3374 {
3375 /* Save register rs. */
3376 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3377 aarch64_insn_r->reg_rec_count = 1;
3378 }
3379 }
3380 }
3381 /* Load register (literal) instructions decoding. */
3382 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3383 {
3384 if (record_debug)
3385 {
3386 fprintf_unfiltered (gdb_stdlog,
3387 "Process record: load register (literal)\n");
3388 }
3389 if (vector_flag)
3390 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3391 else
3392 record_buf[0] = reg_rt;
3393 aarch64_insn_r->reg_rec_count = 1;
3394 }
3395 /* All types of load/store pair instructions decoding. */
3396 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3397 {
3398 if (record_debug)
3399 {
3400 fprintf_unfiltered (gdb_stdlog,
3401 "Process record: load/store pair\n");
3402 }
3403
3404 if (ld_flag)
3405 {
3406 if (vector_flag)
3407 {
3408 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3409 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3410 }
3411 else
3412 {
3413 record_buf[0] = reg_rt;
3414 record_buf[1] = reg_rt2;
3415 }
3416 aarch64_insn_r->reg_rec_count = 2;
3417 }
3418 else
3419 {
3420 uint16_t imm7_off;
3421 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3422 if (!vector_flag)
3423 size_bits = size_bits >> 1;
3424 datasize = 8 << (2 + size_bits);
3425 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3426 offset = offset << (2 + size_bits);
3427 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3428 &address);
3429 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3430 {
3431 if (imm7_off & 0x40)
3432 address = address - offset;
3433 else
3434 address = address + offset;
3435 }
3436
3437 record_buf_mem[0] = datasize / 8;
3438 record_buf_mem[1] = address;
3439 record_buf_mem[2] = datasize / 8;
3440 record_buf_mem[3] = address + (datasize / 8);
3441 aarch64_insn_r->mem_rec_count = 2;
3442 }
3443 if (bit (aarch64_insn_r->aarch64_insn, 23))
3444 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3445 }
3446 /* Load/store register (unsigned immediate) instructions. */
3447 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3448 {
3449 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3450 if (!(opc >> 1))
3451 if (opc & 0x01)
3452 ld_flag = 0x01;
3453 else
3454 ld_flag = 0x0;
3455 else
3456 if (size_bits != 0x03)
3457 ld_flag = 0x01;
3458 else
3459 return AARCH64_RECORD_UNKNOWN;
3460
3461 if (record_debug)
3462 {
3463 fprintf_unfiltered (gdb_stdlog,
3464 "Process record: load/store (unsigned immediate):"
3465 " size %x V %d opc %x\n", size_bits, vector_flag,
3466 opc);
3467 }
3468
3469 if (!ld_flag)
3470 {
3471 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3472 datasize = 8 << size_bits;
3473 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3474 &address);
3475 offset = offset << size_bits;
3476 address = address + offset;
3477
3478 record_buf_mem[0] = datasize >> 3;
3479 record_buf_mem[1] = address;
3480 aarch64_insn_r->mem_rec_count = 1;
3481 }
3482 else
3483 {
3484 if (vector_flag)
3485 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3486 else
3487 record_buf[0] = reg_rt;
3488 aarch64_insn_r->reg_rec_count = 1;
3489 }
3490 }
3491 /* Load/store register (register offset) instructions. */
5d98d3cd
YQ
3492 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3493 && insn_bits10_11 == 0x02 && insn_bit21)
99afc88b
OJ
3494 {
3495 if (record_debug)
3496 {
3497 fprintf_unfiltered (gdb_stdlog,
3498 "Process record: load/store (register offset)\n");
3499 }
3500 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3501 if (!(opc >> 1))
3502 if (opc & 0x01)
3503 ld_flag = 0x01;
3504 else
3505 ld_flag = 0x0;
3506 else
3507 if (size_bits != 0x03)
3508 ld_flag = 0x01;
3509 else
3510 return AARCH64_RECORD_UNKNOWN;
3511
3512 if (!ld_flag)
3513 {
3514 uint64_t reg_rm_val;
3515 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3516 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3517 if (bit (aarch64_insn_r->aarch64_insn, 12))
3518 offset = reg_rm_val << size_bits;
3519 else
3520 offset = reg_rm_val;
3521 datasize = 8 << size_bits;
3522 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3523 &address);
3524 address = address + offset;
3525 record_buf_mem[0] = datasize >> 3;
3526 record_buf_mem[1] = address;
3527 aarch64_insn_r->mem_rec_count = 1;
3528 }
3529 else
3530 {
3531 if (vector_flag)
3532 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3533 else
3534 record_buf[0] = reg_rt;
3535 aarch64_insn_r->reg_rec_count = 1;
3536 }
3537 }
3538 /* Load/store register (immediate and unprivileged) instructions. */
5d98d3cd
YQ
3539 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3540 && !insn_bit21)
99afc88b
OJ
3541 {
3542 if (record_debug)
3543 {
3544 fprintf_unfiltered (gdb_stdlog,
3545 "Process record: load/store (immediate and unprivileged)\n");
3546 }
3547 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3548 if (!(opc >> 1))
3549 if (opc & 0x01)
3550 ld_flag = 0x01;
3551 else
3552 ld_flag = 0x0;
3553 else
3554 if (size_bits != 0x03)
3555 ld_flag = 0x01;
3556 else
3557 return AARCH64_RECORD_UNKNOWN;
3558
3559 if (!ld_flag)
3560 {
3561 uint16_t imm9_off;
3562 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3563 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3564 datasize = 8 << size_bits;
3565 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3566 &address);
3567 if (insn_bits10_11 != 0x01)
3568 {
3569 if (imm9_off & 0x0100)
3570 address = address - offset;
3571 else
3572 address = address + offset;
3573 }
3574 record_buf_mem[0] = datasize >> 3;
3575 record_buf_mem[1] = address;
3576 aarch64_insn_r->mem_rec_count = 1;
3577 }
3578 else
3579 {
3580 if (vector_flag)
3581 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3582 else
3583 record_buf[0] = reg_rt;
3584 aarch64_insn_r->reg_rec_count = 1;
3585 }
3586 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3587 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3588 }
3589 /* Advanced SIMD load/store instructions. */
3590 else
3591 return aarch64_record_asimd_load_store (aarch64_insn_r);
3592
3593 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3594 record_buf_mem);
3595 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3596 record_buf);
3597 return AARCH64_RECORD_SUCCESS;
3598}
3599
3600/* Record handler for data processing SIMD and floating point instructions. */
3601
3602static unsigned int
3603aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3604{
3605 uint8_t insn_bit21, opcode, rmode, reg_rd;
3606 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3607 uint8_t insn_bits11_14;
3608 uint32_t record_buf[2];
3609
3610 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3611 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3612 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3613 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3614 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3615 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3616 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3617 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3618 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3619
3620 if (record_debug)
3621 {
3622 fprintf_unfiltered (gdb_stdlog,
3623 "Process record: data processing SIMD/FP: ");
3624 }
3625
3626 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3627 {
3628 /* Floating point - fixed point conversion instructions. */
3629 if (!insn_bit21)
3630 {
3631 if (record_debug)
3632 fprintf_unfiltered (gdb_stdlog, "FP - fixed point conversion");
3633
3634 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3635 record_buf[0] = reg_rd;
3636 else
3637 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3638 }
3639 /* Floating point - conditional compare instructions. */
3640 else if (insn_bits10_11 == 0x01)
3641 {
3642 if (record_debug)
3643 fprintf_unfiltered (gdb_stdlog, "FP - conditional compare");
3644
3645 record_buf[0] = AARCH64_CPSR_REGNUM;
3646 }
3647 /* Floating point - data processing (2-source) and
3648 conditional select instructions. */
3649 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3650 {
3651 if (record_debug)
3652 fprintf_unfiltered (gdb_stdlog, "FP - DP (2-source)");
3653
3654 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3655 }
3656 else if (insn_bits10_11 == 0x00)
3657 {
3658 /* Floating point - immediate instructions. */
3659 if ((insn_bits12_15 & 0x01) == 0x01
3660 || (insn_bits12_15 & 0x07) == 0x04)
3661 {
3662 if (record_debug)
3663 fprintf_unfiltered (gdb_stdlog, "FP - immediate");
3664 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3665 }
3666 /* Floating point - compare instructions. */
3667 else if ((insn_bits12_15 & 0x03) == 0x02)
3668 {
3669 if (record_debug)
3670 fprintf_unfiltered (gdb_stdlog, "FP - immediate");
3671 record_buf[0] = AARCH64_CPSR_REGNUM;
3672 }
3673 /* Floating point - integer conversions instructions. */
f62fce35 3674 else if (insn_bits12_15 == 0x00)
99afc88b
OJ
3675 {
3676 /* Convert float to integer instruction. */
3677 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3678 {
3679 if (record_debug)
3680 fprintf_unfiltered (gdb_stdlog, "float to int conversion");
3681
3682 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3683 }
3684 /* Convert integer to float instruction. */
3685 else if ((opcode >> 1) == 0x01 && !rmode)
3686 {
3687 if (record_debug)
3688 fprintf_unfiltered (gdb_stdlog, "int to float conversion");
3689
3690 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3691 }
3692 /* Move float to integer instruction. */
3693 else if ((opcode >> 1) == 0x03)
3694 {
3695 if (record_debug)
3696 fprintf_unfiltered (gdb_stdlog, "move float to int");
3697
3698 if (!(opcode & 0x01))
3699 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3700 else
3701 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3702 }
f62fce35
YQ
3703 else
3704 return AARCH64_RECORD_UNKNOWN;
99afc88b 3705 }
f62fce35
YQ
3706 else
3707 return AARCH64_RECORD_UNKNOWN;
99afc88b 3708 }
f62fce35
YQ
3709 else
3710 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
3711 }
3712 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3713 {
3714 if (record_debug)
3715 fprintf_unfiltered (gdb_stdlog, "SIMD copy");
3716
3717 /* Advanced SIMD copy instructions. */
3718 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3719 && !bit (aarch64_insn_r->aarch64_insn, 15)
3720 && bit (aarch64_insn_r->aarch64_insn, 10))
3721 {
3722 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3723 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3724 else
3725 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3726 }
3727 else
3728 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3729 }
3730 /* All remaining floating point or advanced SIMD instructions. */
3731 else
3732 {
3733 if (record_debug)
3734 fprintf_unfiltered (gdb_stdlog, "all remain");
3735
3736 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3737 }
3738
3739 if (record_debug)
3740 fprintf_unfiltered (gdb_stdlog, "\n");
3741
3742 aarch64_insn_r->reg_rec_count++;
3743 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3744 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3745 record_buf);
3746 return AARCH64_RECORD_SUCCESS;
3747}
3748
3749/* Decodes insns type and invokes its record handler. */
3750
3751static unsigned int
3752aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3753{
3754 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3755
3756 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3757 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3758 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3759 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3760
3761 /* Data processing - immediate instructions. */
3762 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3763 return aarch64_record_data_proc_imm (aarch64_insn_r);
3764
3765 /* Branch, exception generation and system instructions. */
3766 if (ins_bit26 && !ins_bit27 && ins_bit28)
3767 return aarch64_record_branch_except_sys (aarch64_insn_r);
3768
3769 /* Load and store instructions. */
3770 if (!ins_bit25 && ins_bit27)
3771 return aarch64_record_load_store (aarch64_insn_r);
3772
3773 /* Data processing - register instructions. */
3774 if (ins_bit25 && !ins_bit26 && ins_bit27)
3775 return aarch64_record_data_proc_reg (aarch64_insn_r);
3776
3777 /* Data processing - SIMD and floating point instructions. */
3778 if (ins_bit25 && ins_bit26 && ins_bit27)
3779 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3780
3781 return AARCH64_RECORD_UNSUPPORTED;
3782}
3783
3784/* Cleans up local record registers and memory allocations. */
3785
3786static void
3787deallocate_reg_mem (insn_decode_record *record)
3788{
3789 xfree (record->aarch64_regs);
3790 xfree (record->aarch64_mems);
3791}
3792
3793/* Parse the current instruction and record the values of the registers and
3794 memory that will be changed in current instruction to record_arch_list
3795 return -1 if something is wrong. */
3796
3797int
3798aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
3799 CORE_ADDR insn_addr)
3800{
3801 uint32_t rec_no = 0;
3802 uint8_t insn_size = 4;
3803 uint32_t ret = 0;
3804 ULONGEST t_bit = 0, insn_id = 0;
3805 gdb_byte buf[insn_size];
3806 insn_decode_record aarch64_record;
3807
3808 memset (&buf[0], 0, insn_size);
3809 memset (&aarch64_record, 0, sizeof (insn_decode_record));
3810 target_read_memory (insn_addr, &buf[0], insn_size);
3811 aarch64_record.aarch64_insn
3812 = (uint32_t) extract_unsigned_integer (&buf[0],
3813 insn_size,
3814 gdbarch_byte_order (gdbarch));
3815 aarch64_record.regcache = regcache;
3816 aarch64_record.this_addr = insn_addr;
3817 aarch64_record.gdbarch = gdbarch;
3818
3819 ret = aarch64_record_decode_insn_handler (&aarch64_record);
3820 if (ret == AARCH64_RECORD_UNSUPPORTED)
3821 {
3822 printf_unfiltered (_("Process record does not support instruction "
3823 "0x%0x at address %s.\n"),
3824 aarch64_record.aarch64_insn,
3825 paddress (gdbarch, insn_addr));
3826 ret = -1;
3827 }
3828
3829 if (0 == ret)
3830 {
3831 /* Record registers. */
3832 record_full_arch_list_add_reg (aarch64_record.regcache,
3833 AARCH64_PC_REGNUM);
3834 /* Always record register CPSR. */
3835 record_full_arch_list_add_reg (aarch64_record.regcache,
3836 AARCH64_CPSR_REGNUM);
3837 if (aarch64_record.aarch64_regs)
3838 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
3839 if (record_full_arch_list_add_reg (aarch64_record.regcache,
3840 aarch64_record.aarch64_regs[rec_no]))
3841 ret = -1;
3842
3843 /* Record memories. */
3844 if (aarch64_record.aarch64_mems)
3845 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
3846 if (record_full_arch_list_add_mem
3847 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
3848 aarch64_record.aarch64_mems[rec_no].len))
3849 ret = -1;
3850
3851 if (record_full_arch_list_add_end ())
3852 ret = -1;
3853 }
3854
3855 deallocate_reg_mem (&aarch64_record);
3856 return ret;
3857}