]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/arm-tdep.c
Stop prologue analysis when past the epilogue
[thirdparty/binutils-gdb.git] / gdb / arm-tdep.c
1 /* Common target dependent code for GDB on ARM systems.
2
3 Copyright (C) 1988-2014 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21
22 #include <ctype.h> /* XXX for isupper (). */
23
24 #include "frame.h"
25 #include "inferior.h"
26 #include "infrun.h"
27 #include "gdbcmd.h"
28 #include "gdbcore.h"
29 #include <string.h>
30 #include "dis-asm.h" /* For register styles. */
31 #include "regcache.h"
32 #include "reggroups.h"
33 #include "doublest.h"
34 #include "value.h"
35 #include "arch-utils.h"
36 #include "osabi.h"
37 #include "frame-unwind.h"
38 #include "frame-base.h"
39 #include "trad-frame.h"
40 #include "objfiles.h"
41 #include "dwarf2-frame.h"
42 #include "gdbtypes.h"
43 #include "prologue-value.h"
44 #include "remote.h"
45 #include "target-descriptions.h"
46 #include "user-regs.h"
47 #include "observer.h"
48
49 #include "arm-tdep.h"
50 #include "gdb/sim-arm.h"
51
52 #include "elf-bfd.h"
53 #include "coff/internal.h"
54 #include "elf/arm.h"
55
56 #include "gdb_assert.h"
57 #include "vec.h"
58
59 #include "record.h"
60 #include "record-full.h"
61
62 #include "features/arm-with-m.c"
63 #include "features/arm-with-m-fpa-layout.c"
64 #include "features/arm-with-m-vfp-d16.c"
65 #include "features/arm-with-iwmmxt.c"
66 #include "features/arm-with-vfpv2.c"
67 #include "features/arm-with-vfpv3.c"
68 #include "features/arm-with-neon.c"
69
70 static int arm_debug;
71
72 /* Macros for setting and testing a bit in a minimal symbol that marks
73 it as Thumb function. The MSB of the minimal symbol's "info" field
74 is used for this purpose.
75
76 MSYMBOL_SET_SPECIAL Actually sets the "special" bit.
77 MSYMBOL_IS_SPECIAL Tests the "special" bit in a minimal symbol. */
78
79 #define MSYMBOL_SET_SPECIAL(msym) \
80 MSYMBOL_TARGET_FLAG_1 (msym) = 1
81
82 #define MSYMBOL_IS_SPECIAL(msym) \
83 MSYMBOL_TARGET_FLAG_1 (msym)
84
85 /* Per-objfile data used for mapping symbols. */
86 static const struct objfile_data *arm_objfile_data_key;
87
88 struct arm_mapping_symbol
89 {
90 bfd_vma value;
91 char type;
92 };
93 typedef struct arm_mapping_symbol arm_mapping_symbol_s;
94 DEF_VEC_O(arm_mapping_symbol_s);
95
96 struct arm_per_objfile
97 {
98 VEC(arm_mapping_symbol_s) **section_maps;
99 };
100
101 /* The list of available "set arm ..." and "show arm ..." commands. */
102 static struct cmd_list_element *setarmcmdlist = NULL;
103 static struct cmd_list_element *showarmcmdlist = NULL;
104
105 /* The type of floating-point to use. Keep this in sync with enum
106 arm_float_model, and the help string in _initialize_arm_tdep. */
107 static const char *const fp_model_strings[] =
108 {
109 "auto",
110 "softfpa",
111 "fpa",
112 "softvfp",
113 "vfp",
114 NULL
115 };
116
117 /* A variable that can be configured by the user. */
118 static enum arm_float_model arm_fp_model = ARM_FLOAT_AUTO;
119 static const char *current_fp_model = "auto";
120
121 /* The ABI to use. Keep this in sync with arm_abi_kind. */
122 static const char *const arm_abi_strings[] =
123 {
124 "auto",
125 "APCS",
126 "AAPCS",
127 NULL
128 };
129
130 /* A variable that can be configured by the user. */
131 static enum arm_abi_kind arm_abi_global = ARM_ABI_AUTO;
132 static const char *arm_abi_string = "auto";
133
134 /* The execution mode to assume. */
135 static const char *const arm_mode_strings[] =
136 {
137 "auto",
138 "arm",
139 "thumb",
140 NULL
141 };
142
143 static const char *arm_fallback_mode_string = "auto";
144 static const char *arm_force_mode_string = "auto";
145
146 /* Internal override of the execution mode. -1 means no override,
147 0 means override to ARM mode, 1 means override to Thumb mode.
148 The effect is the same as if arm_force_mode has been set by the
149 user (except the internal override has precedence over a user's
150 arm_force_mode override). */
151 static int arm_override_mode = -1;
152
153 /* Number of different reg name sets (options). */
154 static int num_disassembly_options;
155
156 /* The standard register names, and all the valid aliases for them. Note
157 that `fp', `sp' and `pc' are not added in this alias list, because they
158 have been added as builtin user registers in
159 std-regs.c:_initialize_frame_reg. */
160 static const struct
161 {
162 const char *name;
163 int regnum;
164 } arm_register_aliases[] = {
165 /* Basic register numbers. */
166 { "r0", 0 },
167 { "r1", 1 },
168 { "r2", 2 },
169 { "r3", 3 },
170 { "r4", 4 },
171 { "r5", 5 },
172 { "r6", 6 },
173 { "r7", 7 },
174 { "r8", 8 },
175 { "r9", 9 },
176 { "r10", 10 },
177 { "r11", 11 },
178 { "r12", 12 },
179 { "r13", 13 },
180 { "r14", 14 },
181 { "r15", 15 },
182 /* Synonyms (argument and variable registers). */
183 { "a1", 0 },
184 { "a2", 1 },
185 { "a3", 2 },
186 { "a4", 3 },
187 { "v1", 4 },
188 { "v2", 5 },
189 { "v3", 6 },
190 { "v4", 7 },
191 { "v5", 8 },
192 { "v6", 9 },
193 { "v7", 10 },
194 { "v8", 11 },
195 /* Other platform-specific names for r9. */
196 { "sb", 9 },
197 { "tr", 9 },
198 /* Special names. */
199 { "ip", 12 },
200 { "lr", 14 },
201 /* Names used by GCC (not listed in the ARM EABI). */
202 { "sl", 10 },
203 /* A special name from the older ATPCS. */
204 { "wr", 7 },
205 };
206
207 static const char *const arm_register_names[] =
208 {"r0", "r1", "r2", "r3", /* 0 1 2 3 */
209 "r4", "r5", "r6", "r7", /* 4 5 6 7 */
210 "r8", "r9", "r10", "r11", /* 8 9 10 11 */
211 "r12", "sp", "lr", "pc", /* 12 13 14 15 */
212 "f0", "f1", "f2", "f3", /* 16 17 18 19 */
213 "f4", "f5", "f6", "f7", /* 20 21 22 23 */
214 "fps", "cpsr" }; /* 24 25 */
215
216 /* Valid register name styles. */
217 static const char **valid_disassembly_styles;
218
219 /* Disassembly style to use. Default to "std" register names. */
220 static const char *disassembly_style;
221
222 /* This is used to keep the bfd arch_info in sync with the disassembly
223 style. */
224 static void set_disassembly_style_sfunc(char *, int,
225 struct cmd_list_element *);
226 static void set_disassembly_style (void);
227
228 static void convert_from_extended (const struct floatformat *, const void *,
229 void *, int);
230 static void convert_to_extended (const struct floatformat *, void *,
231 const void *, int);
232
233 static enum register_status arm_neon_quad_read (struct gdbarch *gdbarch,
234 struct regcache *regcache,
235 int regnum, gdb_byte *buf);
236 static void arm_neon_quad_write (struct gdbarch *gdbarch,
237 struct regcache *regcache,
238 int regnum, const gdb_byte *buf);
239
240 static int thumb_insn_size (unsigned short inst1);
241
242 struct arm_prologue_cache
243 {
244 /* The stack pointer at the time this frame was created; i.e. the
245 caller's stack pointer when this function was called. It is used
246 to identify this frame. */
247 CORE_ADDR prev_sp;
248
249 /* The frame base for this frame is just prev_sp - frame size.
250 FRAMESIZE is the distance from the frame pointer to the
251 initial stack pointer. */
252
253 int framesize;
254
255 /* The register used to hold the frame pointer for this frame. */
256 int framereg;
257
258 /* Saved register offsets. */
259 struct trad_frame_saved_reg *saved_regs;
260 };
261
262 static CORE_ADDR arm_analyze_prologue (struct gdbarch *gdbarch,
263 CORE_ADDR prologue_start,
264 CORE_ADDR prologue_end,
265 struct arm_prologue_cache *cache);
266
267 /* Architecture version for displaced stepping. This effects the behaviour of
268 certain instructions, and really should not be hard-wired. */
269
270 #define DISPLACED_STEPPING_ARCH_VERSION 5
271
272 /* Addresses for calling Thumb functions have the bit 0 set.
273 Here are some macros to test, set, or clear bit 0 of addresses. */
274 #define IS_THUMB_ADDR(addr) ((addr) & 1)
275 #define MAKE_THUMB_ADDR(addr) ((addr) | 1)
276 #define UNMAKE_THUMB_ADDR(addr) ((addr) & ~1)
277
278 /* Set to true if the 32-bit mode is in use. */
279
280 int arm_apcs_32 = 1;
281
282 /* Return the bit mask in ARM_PS_REGNUM that indicates Thumb mode. */
283
284 int
285 arm_psr_thumb_bit (struct gdbarch *gdbarch)
286 {
287 if (gdbarch_tdep (gdbarch)->is_m)
288 return XPSR_T;
289 else
290 return CPSR_T;
291 }
292
293 /* Determine if FRAME is executing in Thumb mode. */
294
295 int
296 arm_frame_is_thumb (struct frame_info *frame)
297 {
298 CORE_ADDR cpsr;
299 ULONGEST t_bit = arm_psr_thumb_bit (get_frame_arch (frame));
300
301 /* Every ARM frame unwinder can unwind the T bit of the CPSR, either
302 directly (from a signal frame or dummy frame) or by interpreting
303 the saved LR (from a prologue or DWARF frame). So consult it and
304 trust the unwinders. */
305 cpsr = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
306
307 return (cpsr & t_bit) != 0;
308 }
309
310 /* Callback for VEC_lower_bound. */
311
312 static inline int
313 arm_compare_mapping_symbols (const struct arm_mapping_symbol *lhs,
314 const struct arm_mapping_symbol *rhs)
315 {
316 return lhs->value < rhs->value;
317 }
318
319 /* Search for the mapping symbol covering MEMADDR. If one is found,
320 return its type. Otherwise, return 0. If START is non-NULL,
321 set *START to the location of the mapping symbol. */
322
323 static char
324 arm_find_mapping_symbol (CORE_ADDR memaddr, CORE_ADDR *start)
325 {
326 struct obj_section *sec;
327
328 /* If there are mapping symbols, consult them. */
329 sec = find_pc_section (memaddr);
330 if (sec != NULL)
331 {
332 struct arm_per_objfile *data;
333 VEC(arm_mapping_symbol_s) *map;
334 struct arm_mapping_symbol map_key = { memaddr - obj_section_addr (sec),
335 0 };
336 unsigned int idx;
337
338 data = objfile_data (sec->objfile, arm_objfile_data_key);
339 if (data != NULL)
340 {
341 map = data->section_maps[sec->the_bfd_section->index];
342 if (!VEC_empty (arm_mapping_symbol_s, map))
343 {
344 struct arm_mapping_symbol *map_sym;
345
346 idx = VEC_lower_bound (arm_mapping_symbol_s, map, &map_key,
347 arm_compare_mapping_symbols);
348
349 /* VEC_lower_bound finds the earliest ordered insertion
350 point. If the following symbol starts at this exact
351 address, we use that; otherwise, the preceding
352 mapping symbol covers this address. */
353 if (idx < VEC_length (arm_mapping_symbol_s, map))
354 {
355 map_sym = VEC_index (arm_mapping_symbol_s, map, idx);
356 if (map_sym->value == map_key.value)
357 {
358 if (start)
359 *start = map_sym->value + obj_section_addr (sec);
360 return map_sym->type;
361 }
362 }
363
364 if (idx > 0)
365 {
366 map_sym = VEC_index (arm_mapping_symbol_s, map, idx - 1);
367 if (start)
368 *start = map_sym->value + obj_section_addr (sec);
369 return map_sym->type;
370 }
371 }
372 }
373 }
374
375 return 0;
376 }
377
378 /* Determine if the program counter specified in MEMADDR is in a Thumb
379 function. This function should be called for addresses unrelated to
380 any executing frame; otherwise, prefer arm_frame_is_thumb. */
381
382 int
383 arm_pc_is_thumb (struct gdbarch *gdbarch, CORE_ADDR memaddr)
384 {
385 struct bound_minimal_symbol sym;
386 char type;
387 struct displaced_step_closure* dsc
388 = get_displaced_step_closure_by_addr(memaddr);
389
390 /* If checking the mode of displaced instruction in copy area, the mode
391 should be determined by instruction on the original address. */
392 if (dsc)
393 {
394 if (debug_displaced)
395 fprintf_unfiltered (gdb_stdlog,
396 "displaced: check mode of %.8lx instead of %.8lx\n",
397 (unsigned long) dsc->insn_addr,
398 (unsigned long) memaddr);
399 memaddr = dsc->insn_addr;
400 }
401
402 /* If bit 0 of the address is set, assume this is a Thumb address. */
403 if (IS_THUMB_ADDR (memaddr))
404 return 1;
405
406 /* Respect internal mode override if active. */
407 if (arm_override_mode != -1)
408 return arm_override_mode;
409
410 /* If the user wants to override the symbol table, let him. */
411 if (strcmp (arm_force_mode_string, "arm") == 0)
412 return 0;
413 if (strcmp (arm_force_mode_string, "thumb") == 0)
414 return 1;
415
416 /* ARM v6-M and v7-M are always in Thumb mode. */
417 if (gdbarch_tdep (gdbarch)->is_m)
418 return 1;
419
420 /* If there are mapping symbols, consult them. */
421 type = arm_find_mapping_symbol (memaddr, NULL);
422 if (type)
423 return type == 't';
424
425 /* Thumb functions have a "special" bit set in minimal symbols. */
426 sym = lookup_minimal_symbol_by_pc (memaddr);
427 if (sym.minsym)
428 return (MSYMBOL_IS_SPECIAL (sym.minsym));
429
430 /* If the user wants to override the fallback mode, let them. */
431 if (strcmp (arm_fallback_mode_string, "arm") == 0)
432 return 0;
433 if (strcmp (arm_fallback_mode_string, "thumb") == 0)
434 return 1;
435
436 /* If we couldn't find any symbol, but we're talking to a running
437 target, then trust the current value of $cpsr. This lets
438 "display/i $pc" always show the correct mode (though if there is
439 a symbol table we will not reach here, so it still may not be
440 displayed in the mode it will be executed). */
441 if (target_has_registers)
442 return arm_frame_is_thumb (get_current_frame ());
443
444 /* Otherwise we're out of luck; we assume ARM. */
445 return 0;
446 }
447
448 /* Remove useless bits from addresses in a running program. */
449 static CORE_ADDR
450 arm_addr_bits_remove (struct gdbarch *gdbarch, CORE_ADDR val)
451 {
452 /* On M-profile devices, do not strip the low bit from EXC_RETURN
453 (the magic exception return address). */
454 if (gdbarch_tdep (gdbarch)->is_m
455 && (val & 0xfffffff0) == 0xfffffff0)
456 return val;
457
458 if (arm_apcs_32)
459 return UNMAKE_THUMB_ADDR (val);
460 else
461 return (val & 0x03fffffc);
462 }
463
464 /* Return 1 if PC is the start of a compiler helper function which
465 can be safely ignored during prologue skipping. IS_THUMB is true
466 if the function is known to be a Thumb function due to the way it
467 is being called. */
468 static int
469 skip_prologue_function (struct gdbarch *gdbarch, CORE_ADDR pc, int is_thumb)
470 {
471 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
472 struct bound_minimal_symbol msym;
473
474 msym = lookup_minimal_symbol_by_pc (pc);
475 if (msym.minsym != NULL
476 && BMSYMBOL_VALUE_ADDRESS (msym) == pc
477 && MSYMBOL_LINKAGE_NAME (msym.minsym) != NULL)
478 {
479 const char *name = MSYMBOL_LINKAGE_NAME (msym.minsym);
480
481 /* The GNU linker's Thumb call stub to foo is named
482 __foo_from_thumb. */
483 if (strstr (name, "_from_thumb") != NULL)
484 name += 2;
485
486 /* On soft-float targets, __truncdfsf2 is called to convert promoted
487 arguments to their argument types in non-prototyped
488 functions. */
489 if (strncmp (name, "__truncdfsf2", strlen ("__truncdfsf2")) == 0)
490 return 1;
491 if (strncmp (name, "__aeabi_d2f", strlen ("__aeabi_d2f")) == 0)
492 return 1;
493
494 /* Internal functions related to thread-local storage. */
495 if (strncmp (name, "__tls_get_addr", strlen ("__tls_get_addr")) == 0)
496 return 1;
497 if (strncmp (name, "__aeabi_read_tp", strlen ("__aeabi_read_tp")) == 0)
498 return 1;
499 }
500 else
501 {
502 /* If we run against a stripped glibc, we may be unable to identify
503 special functions by name. Check for one important case,
504 __aeabi_read_tp, by comparing the *code* against the default
505 implementation (this is hand-written ARM assembler in glibc). */
506
507 if (!is_thumb
508 && read_memory_unsigned_integer (pc, 4, byte_order_for_code)
509 == 0xe3e00a0f /* mov r0, #0xffff0fff */
510 && read_memory_unsigned_integer (pc + 4, 4, byte_order_for_code)
511 == 0xe240f01f) /* sub pc, r0, #31 */
512 return 1;
513 }
514
515 return 0;
516 }
517
518 /* Support routines for instruction parsing. */
519 #define submask(x) ((1L << ((x) + 1)) - 1)
520 #define bit(obj,st) (((obj) >> (st)) & 1)
521 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
522 #define sbits(obj,st,fn) \
523 ((long) (bits(obj,st,fn) | ((long) bit(obj,fn) * ~ submask (fn - st))))
524 #define BranchDest(addr,instr) \
525 ((CORE_ADDR) (((unsigned long) (addr)) + 8 + (sbits (instr, 0, 23) << 2)))
526
527 /* Extract the immediate from instruction movw/movt of encoding T. INSN1 is
528 the first 16-bit of instruction, and INSN2 is the second 16-bit of
529 instruction. */
530 #define EXTRACT_MOVW_MOVT_IMM_T(insn1, insn2) \
531 ((bits ((insn1), 0, 3) << 12) \
532 | (bits ((insn1), 10, 10) << 11) \
533 | (bits ((insn2), 12, 14) << 8) \
534 | bits ((insn2), 0, 7))
535
536 /* Extract the immediate from instruction movw/movt of encoding A. INSN is
537 the 32-bit instruction. */
538 #define EXTRACT_MOVW_MOVT_IMM_A(insn) \
539 ((bits ((insn), 16, 19) << 12) \
540 | bits ((insn), 0, 11))
541
542 /* Decode immediate value; implements ThumbExpandImmediate pseudo-op. */
543
544 static unsigned int
545 thumb_expand_immediate (unsigned int imm)
546 {
547 unsigned int count = imm >> 7;
548
549 if (count < 8)
550 switch (count / 2)
551 {
552 case 0:
553 return imm & 0xff;
554 case 1:
555 return (imm & 0xff) | ((imm & 0xff) << 16);
556 case 2:
557 return ((imm & 0xff) << 8) | ((imm & 0xff) << 24);
558 case 3:
559 return (imm & 0xff) | ((imm & 0xff) << 8)
560 | ((imm & 0xff) << 16) | ((imm & 0xff) << 24);
561 }
562
563 return (0x80 | (imm & 0x7f)) << (32 - count);
564 }
565
566 /* Return 1 if the 16-bit Thumb instruction INST might change
567 control flow, 0 otherwise. */
568
569 static int
570 thumb_instruction_changes_pc (unsigned short inst)
571 {
572 if ((inst & 0xff00) == 0xbd00) /* pop {rlist, pc} */
573 return 1;
574
575 if ((inst & 0xf000) == 0xd000) /* conditional branch */
576 return 1;
577
578 if ((inst & 0xf800) == 0xe000) /* unconditional branch */
579 return 1;
580
581 if ((inst & 0xff00) == 0x4700) /* bx REG, blx REG */
582 return 1;
583
584 if ((inst & 0xff87) == 0x4687) /* mov pc, REG */
585 return 1;
586
587 if ((inst & 0xf500) == 0xb100) /* CBNZ or CBZ. */
588 return 1;
589
590 return 0;
591 }
592
593 /* Return 1 if the 32-bit Thumb instruction in INST1 and INST2
594 might change control flow, 0 otherwise. */
595
596 static int
597 thumb2_instruction_changes_pc (unsigned short inst1, unsigned short inst2)
598 {
599 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
600 {
601 /* Branches and miscellaneous control instructions. */
602
603 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
604 {
605 /* B, BL, BLX. */
606 return 1;
607 }
608 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
609 {
610 /* SUBS PC, LR, #imm8. */
611 return 1;
612 }
613 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
614 {
615 /* Conditional branch. */
616 return 1;
617 }
618
619 return 0;
620 }
621
622 if ((inst1 & 0xfe50) == 0xe810)
623 {
624 /* Load multiple or RFE. */
625
626 if (bit (inst1, 7) && !bit (inst1, 8))
627 {
628 /* LDMIA or POP */
629 if (bit (inst2, 15))
630 return 1;
631 }
632 else if (!bit (inst1, 7) && bit (inst1, 8))
633 {
634 /* LDMDB */
635 if (bit (inst2, 15))
636 return 1;
637 }
638 else if (bit (inst1, 7) && bit (inst1, 8))
639 {
640 /* RFEIA */
641 return 1;
642 }
643 else if (!bit (inst1, 7) && !bit (inst1, 8))
644 {
645 /* RFEDB */
646 return 1;
647 }
648
649 return 0;
650 }
651
652 if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
653 {
654 /* MOV PC or MOVS PC. */
655 return 1;
656 }
657
658 if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
659 {
660 /* LDR PC. */
661 if (bits (inst1, 0, 3) == 15)
662 return 1;
663 if (bit (inst1, 7))
664 return 1;
665 if (bit (inst2, 11))
666 return 1;
667 if ((inst2 & 0x0fc0) == 0x0000)
668 return 1;
669
670 return 0;
671 }
672
673 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
674 {
675 /* TBB. */
676 return 1;
677 }
678
679 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
680 {
681 /* TBH. */
682 return 1;
683 }
684
685 return 0;
686 }
687
688 /* Return 1 if the 16-bit Thumb instruction INSN restores SP in
689 epilogue, 0 otherwise. */
690
691 static int
692 thumb_instruction_restores_sp (unsigned short insn)
693 {
694 return (insn == 0x46bd /* mov sp, r7 */
695 || (insn & 0xff80) == 0xb000 /* add sp, imm */
696 || (insn & 0xfe00) == 0xbc00); /* pop <registers> */
697 }
698
699 /* Analyze a Thumb prologue, looking for a recognizable stack frame
700 and frame pointer. Scan until we encounter a store that could
701 clobber the stack frame unexpectedly, or an unknown instruction.
702 Return the last address which is definitely safe to skip for an
703 initial breakpoint. */
704
705 static CORE_ADDR
706 thumb_analyze_prologue (struct gdbarch *gdbarch,
707 CORE_ADDR start, CORE_ADDR limit,
708 struct arm_prologue_cache *cache)
709 {
710 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
711 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
712 int i;
713 pv_t regs[16];
714 struct pv_area *stack;
715 struct cleanup *back_to;
716 CORE_ADDR offset;
717 CORE_ADDR unrecognized_pc = 0;
718
719 for (i = 0; i < 16; i++)
720 regs[i] = pv_register (i, 0);
721 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
722 back_to = make_cleanup_free_pv_area (stack);
723
724 while (start < limit)
725 {
726 unsigned short insn;
727
728 insn = read_memory_unsigned_integer (start, 2, byte_order_for_code);
729
730 if ((insn & 0xfe00) == 0xb400) /* push { rlist } */
731 {
732 int regno;
733 int mask;
734
735 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
736 break;
737
738 /* Bits 0-7 contain a mask for registers R0-R7. Bit 8 says
739 whether to save LR (R14). */
740 mask = (insn & 0xff) | ((insn & 0x100) << 6);
741
742 /* Calculate offsets of saved R0-R7 and LR. */
743 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
744 if (mask & (1 << regno))
745 {
746 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
747 -4);
748 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
749 }
750 }
751 else if ((insn & 0xff80) == 0xb080) /* sub sp, #imm */
752 {
753 offset = (insn & 0x7f) << 2; /* get scaled offset */
754 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
755 -offset);
756 }
757 else if (thumb_instruction_restores_sp (insn))
758 {
759 /* Don't scan past the epilogue. */
760 break;
761 }
762 else if ((insn & 0xf800) == 0xa800) /* add Rd, sp, #imm */
763 regs[bits (insn, 8, 10)] = pv_add_constant (regs[ARM_SP_REGNUM],
764 (insn & 0xff) << 2);
765 else if ((insn & 0xfe00) == 0x1c00 /* add Rd, Rn, #imm */
766 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
767 regs[bits (insn, 0, 2)] = pv_add_constant (regs[bits (insn, 3, 5)],
768 bits (insn, 6, 8));
769 else if ((insn & 0xf800) == 0x3000 /* add Rd, #imm */
770 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
771 regs[bits (insn, 8, 10)] = pv_add_constant (regs[bits (insn, 8, 10)],
772 bits (insn, 0, 7));
773 else if ((insn & 0xfe00) == 0x1800 /* add Rd, Rn, Rm */
774 && pv_is_register (regs[bits (insn, 6, 8)], ARM_SP_REGNUM)
775 && pv_is_constant (regs[bits (insn, 3, 5)]))
776 regs[bits (insn, 0, 2)] = pv_add (regs[bits (insn, 3, 5)],
777 regs[bits (insn, 6, 8)]);
778 else if ((insn & 0xff00) == 0x4400 /* add Rd, Rm */
779 && pv_is_constant (regs[bits (insn, 3, 6)]))
780 {
781 int rd = (bit (insn, 7) << 3) + bits (insn, 0, 2);
782 int rm = bits (insn, 3, 6);
783 regs[rd] = pv_add (regs[rd], regs[rm]);
784 }
785 else if ((insn & 0xff00) == 0x4600) /* mov hi, lo or mov lo, hi */
786 {
787 int dst_reg = (insn & 0x7) + ((insn & 0x80) >> 4);
788 int src_reg = (insn & 0x78) >> 3;
789 regs[dst_reg] = regs[src_reg];
790 }
791 else if ((insn & 0xf800) == 0x9000) /* str rd, [sp, #off] */
792 {
793 /* Handle stores to the stack. Normally pushes are used,
794 but with GCC -mtpcs-frame, there may be other stores
795 in the prologue to create the frame. */
796 int regno = (insn >> 8) & 0x7;
797 pv_t addr;
798
799 offset = (insn & 0xff) << 2;
800 addr = pv_add_constant (regs[ARM_SP_REGNUM], offset);
801
802 if (pv_area_store_would_trash (stack, addr))
803 break;
804
805 pv_area_store (stack, addr, 4, regs[regno]);
806 }
807 else if ((insn & 0xf800) == 0x6000) /* str rd, [rn, #off] */
808 {
809 int rd = bits (insn, 0, 2);
810 int rn = bits (insn, 3, 5);
811 pv_t addr;
812
813 offset = bits (insn, 6, 10) << 2;
814 addr = pv_add_constant (regs[rn], offset);
815
816 if (pv_area_store_would_trash (stack, addr))
817 break;
818
819 pv_area_store (stack, addr, 4, regs[rd]);
820 }
821 else if (((insn & 0xf800) == 0x7000 /* strb Rd, [Rn, #off] */
822 || (insn & 0xf800) == 0x8000) /* strh Rd, [Rn, #off] */
823 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
824 /* Ignore stores of argument registers to the stack. */
825 ;
826 else if ((insn & 0xf800) == 0xc800 /* ldmia Rn!, { registers } */
827 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
828 /* Ignore block loads from the stack, potentially copying
829 parameters from memory. */
830 ;
831 else if ((insn & 0xf800) == 0x9800 /* ldr Rd, [Rn, #immed] */
832 || ((insn & 0xf800) == 0x6800 /* ldr Rd, [sp, #immed] */
833 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM)))
834 /* Similarly ignore single loads from the stack. */
835 ;
836 else if ((insn & 0xffc0) == 0x0000 /* lsls Rd, Rm, #0 */
837 || (insn & 0xffc0) == 0x1c00) /* add Rd, Rn, #0 */
838 /* Skip register copies, i.e. saves to another register
839 instead of the stack. */
840 ;
841 else if ((insn & 0xf800) == 0x2000) /* movs Rd, #imm */
842 /* Recognize constant loads; even with small stacks these are necessary
843 on Thumb. */
844 regs[bits (insn, 8, 10)] = pv_constant (bits (insn, 0, 7));
845 else if ((insn & 0xf800) == 0x4800) /* ldr Rd, [pc, #imm] */
846 {
847 /* Constant pool loads, for the same reason. */
848 unsigned int constant;
849 CORE_ADDR loc;
850
851 loc = start + 4 + bits (insn, 0, 7) * 4;
852 constant = read_memory_unsigned_integer (loc, 4, byte_order);
853 regs[bits (insn, 8, 10)] = pv_constant (constant);
854 }
855 else if (thumb_insn_size (insn) == 4) /* 32-bit Thumb-2 instructions. */
856 {
857 unsigned short inst2;
858
859 inst2 = read_memory_unsigned_integer (start + 2, 2,
860 byte_order_for_code);
861
862 if ((insn & 0xf800) == 0xf000 && (inst2 & 0xe800) == 0xe800)
863 {
864 /* BL, BLX. Allow some special function calls when
865 skipping the prologue; GCC generates these before
866 storing arguments to the stack. */
867 CORE_ADDR nextpc;
868 int j1, j2, imm1, imm2;
869
870 imm1 = sbits (insn, 0, 10);
871 imm2 = bits (inst2, 0, 10);
872 j1 = bit (inst2, 13);
873 j2 = bit (inst2, 11);
874
875 offset = ((imm1 << 12) + (imm2 << 1));
876 offset ^= ((!j2) << 22) | ((!j1) << 23);
877
878 nextpc = start + 4 + offset;
879 /* For BLX make sure to clear the low bits. */
880 if (bit (inst2, 12) == 0)
881 nextpc = nextpc & 0xfffffffc;
882
883 if (!skip_prologue_function (gdbarch, nextpc,
884 bit (inst2, 12) != 0))
885 break;
886 }
887
888 else if ((insn & 0xffd0) == 0xe900 /* stmdb Rn{!},
889 { registers } */
890 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
891 {
892 pv_t addr = regs[bits (insn, 0, 3)];
893 int regno;
894
895 if (pv_area_store_would_trash (stack, addr))
896 break;
897
898 /* Calculate offsets of saved registers. */
899 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
900 if (inst2 & (1 << regno))
901 {
902 addr = pv_add_constant (addr, -4);
903 pv_area_store (stack, addr, 4, regs[regno]);
904 }
905
906 if (insn & 0x0020)
907 regs[bits (insn, 0, 3)] = addr;
908 }
909
910 else if ((insn & 0xff50) == 0xe940 /* strd Rt, Rt2,
911 [Rn, #+/-imm]{!} */
912 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
913 {
914 int regno1 = bits (inst2, 12, 15);
915 int regno2 = bits (inst2, 8, 11);
916 pv_t addr = regs[bits (insn, 0, 3)];
917
918 offset = inst2 & 0xff;
919 if (insn & 0x0080)
920 addr = pv_add_constant (addr, offset);
921 else
922 addr = pv_add_constant (addr, -offset);
923
924 if (pv_area_store_would_trash (stack, addr))
925 break;
926
927 pv_area_store (stack, addr, 4, regs[regno1]);
928 pv_area_store (stack, pv_add_constant (addr, 4),
929 4, regs[regno2]);
930
931 if (insn & 0x0020)
932 regs[bits (insn, 0, 3)] = addr;
933 }
934
935 else if ((insn & 0xfff0) == 0xf8c0 /* str Rt,[Rn,+/-#imm]{!} */
936 && (inst2 & 0x0c00) == 0x0c00
937 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
938 {
939 int regno = bits (inst2, 12, 15);
940 pv_t addr = regs[bits (insn, 0, 3)];
941
942 offset = inst2 & 0xff;
943 if (inst2 & 0x0200)
944 addr = pv_add_constant (addr, offset);
945 else
946 addr = pv_add_constant (addr, -offset);
947
948 if (pv_area_store_would_trash (stack, addr))
949 break;
950
951 pv_area_store (stack, addr, 4, regs[regno]);
952
953 if (inst2 & 0x0100)
954 regs[bits (insn, 0, 3)] = addr;
955 }
956
957 else if ((insn & 0xfff0) == 0xf8c0 /* str.w Rt,[Rn,#imm] */
958 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
959 {
960 int regno = bits (inst2, 12, 15);
961 pv_t addr;
962
963 offset = inst2 & 0xfff;
964 addr = pv_add_constant (regs[bits (insn, 0, 3)], offset);
965
966 if (pv_area_store_would_trash (stack, addr))
967 break;
968
969 pv_area_store (stack, addr, 4, regs[regno]);
970 }
971
972 else if ((insn & 0xffd0) == 0xf880 /* str{bh}.w Rt,[Rn,#imm] */
973 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
974 /* Ignore stores of argument registers to the stack. */
975 ;
976
977 else if ((insn & 0xffd0) == 0xf800 /* str{bh} Rt,[Rn,#+/-imm] */
978 && (inst2 & 0x0d00) == 0x0c00
979 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
980 /* Ignore stores of argument registers to the stack. */
981 ;
982
983 else if ((insn & 0xffd0) == 0xe890 /* ldmia Rn[!],
984 { registers } */
985 && (inst2 & 0x8000) == 0x0000
986 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
987 /* Ignore block loads from the stack, potentially copying
988 parameters from memory. */
989 ;
990
991 else if ((insn & 0xffb0) == 0xe950 /* ldrd Rt, Rt2,
992 [Rn, #+/-imm] */
993 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
994 /* Similarly ignore dual loads from the stack. */
995 ;
996
997 else if ((insn & 0xfff0) == 0xf850 /* ldr Rt,[Rn,#+/-imm] */
998 && (inst2 & 0x0d00) == 0x0c00
999 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
1000 /* Similarly ignore single loads from the stack. */
1001 ;
1002
1003 else if ((insn & 0xfff0) == 0xf8d0 /* ldr.w Rt,[Rn,#imm] */
1004 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
1005 /* Similarly ignore single loads from the stack. */
1006 ;
1007
1008 else if ((insn & 0xfbf0) == 0xf100 /* add.w Rd, Rn, #imm */
1009 && (inst2 & 0x8000) == 0x0000)
1010 {
1011 unsigned int imm = ((bits (insn, 10, 10) << 11)
1012 | (bits (inst2, 12, 14) << 8)
1013 | bits (inst2, 0, 7));
1014
1015 regs[bits (inst2, 8, 11)]
1016 = pv_add_constant (regs[bits (insn, 0, 3)],
1017 thumb_expand_immediate (imm));
1018 }
1019
1020 else if ((insn & 0xfbf0) == 0xf200 /* addw Rd, Rn, #imm */
1021 && (inst2 & 0x8000) == 0x0000)
1022 {
1023 unsigned int imm = ((bits (insn, 10, 10) << 11)
1024 | (bits (inst2, 12, 14) << 8)
1025 | bits (inst2, 0, 7));
1026
1027 regs[bits (inst2, 8, 11)]
1028 = pv_add_constant (regs[bits (insn, 0, 3)], imm);
1029 }
1030
1031 else if ((insn & 0xfbf0) == 0xf1a0 /* sub.w Rd, Rn, #imm */
1032 && (inst2 & 0x8000) == 0x0000)
1033 {
1034 unsigned int imm = ((bits (insn, 10, 10) << 11)
1035 | (bits (inst2, 12, 14) << 8)
1036 | bits (inst2, 0, 7));
1037
1038 regs[bits (inst2, 8, 11)]
1039 = pv_add_constant (regs[bits (insn, 0, 3)],
1040 - (CORE_ADDR) thumb_expand_immediate (imm));
1041 }
1042
1043 else if ((insn & 0xfbf0) == 0xf2a0 /* subw Rd, Rn, #imm */
1044 && (inst2 & 0x8000) == 0x0000)
1045 {
1046 unsigned int imm = ((bits (insn, 10, 10) << 11)
1047 | (bits (inst2, 12, 14) << 8)
1048 | bits (inst2, 0, 7));
1049
1050 regs[bits (inst2, 8, 11)]
1051 = pv_add_constant (regs[bits (insn, 0, 3)], - (CORE_ADDR) imm);
1052 }
1053
1054 else if ((insn & 0xfbff) == 0xf04f) /* mov.w Rd, #const */
1055 {
1056 unsigned int imm = ((bits (insn, 10, 10) << 11)
1057 | (bits (inst2, 12, 14) << 8)
1058 | bits (inst2, 0, 7));
1059
1060 regs[bits (inst2, 8, 11)]
1061 = pv_constant (thumb_expand_immediate (imm));
1062 }
1063
1064 else if ((insn & 0xfbf0) == 0xf240) /* movw Rd, #const */
1065 {
1066 unsigned int imm
1067 = EXTRACT_MOVW_MOVT_IMM_T (insn, inst2);
1068
1069 regs[bits (inst2, 8, 11)] = pv_constant (imm);
1070 }
1071
1072 else if (insn == 0xea5f /* mov.w Rd,Rm */
1073 && (inst2 & 0xf0f0) == 0)
1074 {
1075 int dst_reg = (inst2 & 0x0f00) >> 8;
1076 int src_reg = inst2 & 0xf;
1077 regs[dst_reg] = regs[src_reg];
1078 }
1079
1080 else if ((insn & 0xff7f) == 0xf85f) /* ldr.w Rt,<label> */
1081 {
1082 /* Constant pool loads. */
1083 unsigned int constant;
1084 CORE_ADDR loc;
1085
1086 offset = bits (inst2, 0, 11);
1087 if (insn & 0x0080)
1088 loc = start + 4 + offset;
1089 else
1090 loc = start + 4 - offset;
1091
1092 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1093 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1094 }
1095
1096 else if ((insn & 0xff7f) == 0xe95f) /* ldrd Rt,Rt2,<label> */
1097 {
1098 /* Constant pool loads. */
1099 unsigned int constant;
1100 CORE_ADDR loc;
1101
1102 offset = bits (inst2, 0, 7) << 2;
1103 if (insn & 0x0080)
1104 loc = start + 4 + offset;
1105 else
1106 loc = start + 4 - offset;
1107
1108 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1109 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1110
1111 constant = read_memory_unsigned_integer (loc + 4, 4, byte_order);
1112 regs[bits (inst2, 8, 11)] = pv_constant (constant);
1113 }
1114
1115 else if (thumb2_instruction_changes_pc (insn, inst2))
1116 {
1117 /* Don't scan past anything that might change control flow. */
1118 break;
1119 }
1120 else
1121 {
1122 /* The optimizer might shove anything into the prologue,
1123 so we just skip what we don't recognize. */
1124 unrecognized_pc = start;
1125 }
1126
1127 start += 2;
1128 }
1129 else if (thumb_instruction_changes_pc (insn))
1130 {
1131 /* Don't scan past anything that might change control flow. */
1132 break;
1133 }
1134 else
1135 {
1136 /* The optimizer might shove anything into the prologue,
1137 so we just skip what we don't recognize. */
1138 unrecognized_pc = start;
1139 }
1140
1141 start += 2;
1142 }
1143
1144 if (arm_debug)
1145 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1146 paddress (gdbarch, start));
1147
1148 if (unrecognized_pc == 0)
1149 unrecognized_pc = start;
1150
1151 if (cache == NULL)
1152 {
1153 do_cleanups (back_to);
1154 return unrecognized_pc;
1155 }
1156
1157 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1158 {
1159 /* Frame pointer is fp. Frame size is constant. */
1160 cache->framereg = ARM_FP_REGNUM;
1161 cache->framesize = -regs[ARM_FP_REGNUM].k;
1162 }
1163 else if (pv_is_register (regs[THUMB_FP_REGNUM], ARM_SP_REGNUM))
1164 {
1165 /* Frame pointer is r7. Frame size is constant. */
1166 cache->framereg = THUMB_FP_REGNUM;
1167 cache->framesize = -regs[THUMB_FP_REGNUM].k;
1168 }
1169 else
1170 {
1171 /* Try the stack pointer... this is a bit desperate. */
1172 cache->framereg = ARM_SP_REGNUM;
1173 cache->framesize = -regs[ARM_SP_REGNUM].k;
1174 }
1175
1176 for (i = 0; i < 16; i++)
1177 if (pv_area_find_reg (stack, gdbarch, i, &offset))
1178 cache->saved_regs[i].addr = offset;
1179
1180 do_cleanups (back_to);
1181 return unrecognized_pc;
1182 }
1183
1184
1185 /* Try to analyze the instructions starting from PC, which load symbol
1186 __stack_chk_guard. Return the address of instruction after loading this
1187 symbol, set the dest register number to *BASEREG, and set the size of
1188 instructions for loading symbol in OFFSET. Return 0 if instructions are
1189 not recognized. */
1190
1191 static CORE_ADDR
1192 arm_analyze_load_stack_chk_guard(CORE_ADDR pc, struct gdbarch *gdbarch,
1193 unsigned int *destreg, int *offset)
1194 {
1195 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1196 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1197 unsigned int low, high, address;
1198
1199 address = 0;
1200 if (is_thumb)
1201 {
1202 unsigned short insn1
1203 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
1204
1205 if ((insn1 & 0xf800) == 0x4800) /* ldr Rd, #immed */
1206 {
1207 *destreg = bits (insn1, 8, 10);
1208 *offset = 2;
1209 address = bits (insn1, 0, 7);
1210 }
1211 else if ((insn1 & 0xfbf0) == 0xf240) /* movw Rd, #const */
1212 {
1213 unsigned short insn2
1214 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
1215
1216 low = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1217
1218 insn1
1219 = read_memory_unsigned_integer (pc + 4, 2, byte_order_for_code);
1220 insn2
1221 = read_memory_unsigned_integer (pc + 6, 2, byte_order_for_code);
1222
1223 /* movt Rd, #const */
1224 if ((insn1 & 0xfbc0) == 0xf2c0)
1225 {
1226 high = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1227 *destreg = bits (insn2, 8, 11);
1228 *offset = 8;
1229 address = (high << 16 | low);
1230 }
1231 }
1232 }
1233 else
1234 {
1235 unsigned int insn
1236 = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
1237
1238 if ((insn & 0x0e5f0000) == 0x041f0000) /* ldr Rd, #immed */
1239 {
1240 address = bits (insn, 0, 11);
1241 *destreg = bits (insn, 12, 15);
1242 *offset = 4;
1243 }
1244 else if ((insn & 0x0ff00000) == 0x03000000) /* movw Rd, #const */
1245 {
1246 low = EXTRACT_MOVW_MOVT_IMM_A (insn);
1247
1248 insn
1249 = read_memory_unsigned_integer (pc + 4, 4, byte_order_for_code);
1250
1251 if ((insn & 0x0ff00000) == 0x03400000) /* movt Rd, #const */
1252 {
1253 high = EXTRACT_MOVW_MOVT_IMM_A (insn);
1254 *destreg = bits (insn, 12, 15);
1255 *offset = 8;
1256 address = (high << 16 | low);
1257 }
1258 }
1259 }
1260
1261 return address;
1262 }
1263
1264 /* Try to skip a sequence of instructions used for stack protector. If PC
1265 points to the first instruction of this sequence, return the address of
1266 first instruction after this sequence, otherwise, return original PC.
1267
1268 On arm, this sequence of instructions is composed of mainly three steps,
1269 Step 1: load symbol __stack_chk_guard,
1270 Step 2: load from address of __stack_chk_guard,
1271 Step 3: store it to somewhere else.
1272
1273 Usually, instructions on step 2 and step 3 are the same on various ARM
1274 architectures. On step 2, it is one instruction 'ldr Rx, [Rn, #0]', and
1275 on step 3, it is also one instruction 'str Rx, [r7, #immd]'. However,
1276 instructions in step 1 vary from different ARM architectures. On ARMv7,
1277 they are,
1278
1279 movw Rn, #:lower16:__stack_chk_guard
1280 movt Rn, #:upper16:__stack_chk_guard
1281
1282 On ARMv5t, it is,
1283
1284 ldr Rn, .Label
1285 ....
1286 .Lable:
1287 .word __stack_chk_guard
1288
1289 Since ldr/str is a very popular instruction, we can't use them as
1290 'fingerprint' or 'signature' of stack protector sequence. Here we choose
1291 sequence {movw/movt, ldr}/ldr/str plus symbol __stack_chk_guard, if not
1292 stripped, as the 'fingerprint' of a stack protector cdoe sequence. */
1293
1294 static CORE_ADDR
1295 arm_skip_stack_protector(CORE_ADDR pc, struct gdbarch *gdbarch)
1296 {
1297 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1298 unsigned int basereg;
1299 struct bound_minimal_symbol stack_chk_guard;
1300 int offset;
1301 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1302 CORE_ADDR addr;
1303
1304 /* Try to parse the instructions in Step 1. */
1305 addr = arm_analyze_load_stack_chk_guard (pc, gdbarch,
1306 &basereg, &offset);
1307 if (!addr)
1308 return pc;
1309
1310 stack_chk_guard = lookup_minimal_symbol_by_pc (addr);
1311 /* If name of symbol doesn't start with '__stack_chk_guard', this
1312 instruction sequence is not for stack protector. If symbol is
1313 removed, we conservatively think this sequence is for stack protector. */
1314 if (stack_chk_guard.minsym
1315 && strncmp (MSYMBOL_LINKAGE_NAME (stack_chk_guard.minsym),
1316 "__stack_chk_guard",
1317 strlen ("__stack_chk_guard")) != 0)
1318 return pc;
1319
1320 if (is_thumb)
1321 {
1322 unsigned int destreg;
1323 unsigned short insn
1324 = read_memory_unsigned_integer (pc + offset, 2, byte_order_for_code);
1325
1326 /* Step 2: ldr Rd, [Rn, #immed], encoding T1. */
1327 if ((insn & 0xf800) != 0x6800)
1328 return pc;
1329 if (bits (insn, 3, 5) != basereg)
1330 return pc;
1331 destreg = bits (insn, 0, 2);
1332
1333 insn = read_memory_unsigned_integer (pc + offset + 2, 2,
1334 byte_order_for_code);
1335 /* Step 3: str Rd, [Rn, #immed], encoding T1. */
1336 if ((insn & 0xf800) != 0x6000)
1337 return pc;
1338 if (destreg != bits (insn, 0, 2))
1339 return pc;
1340 }
1341 else
1342 {
1343 unsigned int destreg;
1344 unsigned int insn
1345 = read_memory_unsigned_integer (pc + offset, 4, byte_order_for_code);
1346
1347 /* Step 2: ldr Rd, [Rn, #immed], encoding A1. */
1348 if ((insn & 0x0e500000) != 0x04100000)
1349 return pc;
1350 if (bits (insn, 16, 19) != basereg)
1351 return pc;
1352 destreg = bits (insn, 12, 15);
1353 /* Step 3: str Rd, [Rn, #immed], encoding A1. */
1354 insn = read_memory_unsigned_integer (pc + offset + 4,
1355 4, byte_order_for_code);
1356 if ((insn & 0x0e500000) != 0x04000000)
1357 return pc;
1358 if (bits (insn, 12, 15) != destreg)
1359 return pc;
1360 }
1361 /* The size of total two instructions ldr/str is 4 on Thumb-2, while 8
1362 on arm. */
1363 if (is_thumb)
1364 return pc + offset + 4;
1365 else
1366 return pc + offset + 8;
1367 }
1368
1369 /* Advance the PC across any function entry prologue instructions to
1370 reach some "real" code.
1371
1372 The APCS (ARM Procedure Call Standard) defines the following
1373 prologue:
1374
1375 mov ip, sp
1376 [stmfd sp!, {a1,a2,a3,a4}]
1377 stmfd sp!, {...,fp,ip,lr,pc}
1378 [stfe f7, [sp, #-12]!]
1379 [stfe f6, [sp, #-12]!]
1380 [stfe f5, [sp, #-12]!]
1381 [stfe f4, [sp, #-12]!]
1382 sub fp, ip, #nn @@ nn == 20 or 4 depending on second insn. */
1383
1384 static CORE_ADDR
1385 arm_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1386 {
1387 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1388 unsigned long inst;
1389 CORE_ADDR skip_pc;
1390 CORE_ADDR func_addr, limit_pc;
1391
1392 /* See if we can determine the end of the prologue via the symbol table.
1393 If so, then return either PC, or the PC after the prologue, whichever
1394 is greater. */
1395 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
1396 {
1397 CORE_ADDR post_prologue_pc
1398 = skip_prologue_using_sal (gdbarch, func_addr);
1399 struct symtab *s = find_pc_symtab (func_addr);
1400
1401 if (post_prologue_pc)
1402 post_prologue_pc
1403 = arm_skip_stack_protector (post_prologue_pc, gdbarch);
1404
1405
1406 /* GCC always emits a line note before the prologue and another
1407 one after, even if the two are at the same address or on the
1408 same line. Take advantage of this so that we do not need to
1409 know every instruction that might appear in the prologue. We
1410 will have producer information for most binaries; if it is
1411 missing (e.g. for -gstabs), assuming the GNU tools. */
1412 if (post_prologue_pc
1413 && (s == NULL
1414 || s->producer == NULL
1415 || strncmp (s->producer, "GNU ", sizeof ("GNU ") - 1) == 0
1416 || strncmp (s->producer, "clang ", sizeof ("clang ") - 1) == 0))
1417 return post_prologue_pc;
1418
1419 if (post_prologue_pc != 0)
1420 {
1421 CORE_ADDR analyzed_limit;
1422
1423 /* For non-GCC compilers, make sure the entire line is an
1424 acceptable prologue; GDB will round this function's
1425 return value up to the end of the following line so we
1426 can not skip just part of a line (and we do not want to).
1427
1428 RealView does not treat the prologue specially, but does
1429 associate prologue code with the opening brace; so this
1430 lets us skip the first line if we think it is the opening
1431 brace. */
1432 if (arm_pc_is_thumb (gdbarch, func_addr))
1433 analyzed_limit = thumb_analyze_prologue (gdbarch, func_addr,
1434 post_prologue_pc, NULL);
1435 else
1436 analyzed_limit = arm_analyze_prologue (gdbarch, func_addr,
1437 post_prologue_pc, NULL);
1438
1439 if (analyzed_limit != post_prologue_pc)
1440 return func_addr;
1441
1442 return post_prologue_pc;
1443 }
1444 }
1445
1446 /* Can't determine prologue from the symbol table, need to examine
1447 instructions. */
1448
1449 /* Find an upper limit on the function prologue using the debug
1450 information. If the debug information could not be used to provide
1451 that bound, then use an arbitrary large number as the upper bound. */
1452 /* Like arm_scan_prologue, stop no later than pc + 64. */
1453 limit_pc = skip_prologue_using_sal (gdbarch, pc);
1454 if (limit_pc == 0)
1455 limit_pc = pc + 64; /* Magic. */
1456
1457
1458 /* Check if this is Thumb code. */
1459 if (arm_pc_is_thumb (gdbarch, pc))
1460 return thumb_analyze_prologue (gdbarch, pc, limit_pc, NULL);
1461
1462 for (skip_pc = pc; skip_pc < limit_pc; skip_pc += 4)
1463 {
1464 inst = read_memory_unsigned_integer (skip_pc, 4, byte_order_for_code);
1465
1466 /* "mov ip, sp" is no longer a required part of the prologue. */
1467 if (inst == 0xe1a0c00d) /* mov ip, sp */
1468 continue;
1469
1470 if ((inst & 0xfffff000) == 0xe28dc000) /* add ip, sp #n */
1471 continue;
1472
1473 if ((inst & 0xfffff000) == 0xe24dc000) /* sub ip, sp #n */
1474 continue;
1475
1476 /* Some prologues begin with "str lr, [sp, #-4]!". */
1477 if (inst == 0xe52de004) /* str lr, [sp, #-4]! */
1478 continue;
1479
1480 if ((inst & 0xfffffff0) == 0xe92d0000) /* stmfd sp!,{a1,a2,a3,a4} */
1481 continue;
1482
1483 if ((inst & 0xfffff800) == 0xe92dd800) /* stmfd sp!,{fp,ip,lr,pc} */
1484 continue;
1485
1486 /* Any insns after this point may float into the code, if it makes
1487 for better instruction scheduling, so we skip them only if we
1488 find them, but still consider the function to be frame-ful. */
1489
1490 /* We may have either one sfmfd instruction here, or several stfe
1491 insns, depending on the version of floating point code we
1492 support. */
1493 if ((inst & 0xffbf0fff) == 0xec2d0200) /* sfmfd fn, <cnt>, [sp]! */
1494 continue;
1495
1496 if ((inst & 0xffff8fff) == 0xed6d0103) /* stfe fn, [sp, #-12]! */
1497 continue;
1498
1499 if ((inst & 0xfffff000) == 0xe24cb000) /* sub fp, ip, #nn */
1500 continue;
1501
1502 if ((inst & 0xfffff000) == 0xe24dd000) /* sub sp, sp, #nn */
1503 continue;
1504
1505 if ((inst & 0xffffc000) == 0xe54b0000 /* strb r(0123),[r11,#-nn] */
1506 || (inst & 0xffffc0f0) == 0xe14b00b0 /* strh r(0123),[r11,#-nn] */
1507 || (inst & 0xffffc000) == 0xe50b0000) /* str r(0123),[r11,#-nn] */
1508 continue;
1509
1510 if ((inst & 0xffffc000) == 0xe5cd0000 /* strb r(0123),[sp,#nn] */
1511 || (inst & 0xffffc0f0) == 0xe1cd00b0 /* strh r(0123),[sp,#nn] */
1512 || (inst & 0xffffc000) == 0xe58d0000) /* str r(0123),[sp,#nn] */
1513 continue;
1514
1515 /* Un-recognized instruction; stop scanning. */
1516 break;
1517 }
1518
1519 return skip_pc; /* End of prologue. */
1520 }
1521
1522 /* *INDENT-OFF* */
1523 /* Function: thumb_scan_prologue (helper function for arm_scan_prologue)
1524 This function decodes a Thumb function prologue to determine:
1525 1) the size of the stack frame
1526 2) which registers are saved on it
1527 3) the offsets of saved regs
1528 4) the offset from the stack pointer to the frame pointer
1529
1530 A typical Thumb function prologue would create this stack frame
1531 (offsets relative to FP)
1532 old SP -> 24 stack parameters
1533 20 LR
1534 16 R7
1535 R7 -> 0 local variables (16 bytes)
1536 SP -> -12 additional stack space (12 bytes)
1537 The frame size would thus be 36 bytes, and the frame offset would be
1538 12 bytes. The frame register is R7.
1539
1540 The comments for thumb_skip_prolog() describe the algorithm we use
1541 to detect the end of the prolog. */
1542 /* *INDENT-ON* */
1543
1544 static void
1545 thumb_scan_prologue (struct gdbarch *gdbarch, CORE_ADDR prev_pc,
1546 CORE_ADDR block_addr, struct arm_prologue_cache *cache)
1547 {
1548 CORE_ADDR prologue_start;
1549 CORE_ADDR prologue_end;
1550
1551 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1552 &prologue_end))
1553 {
1554 /* See comment in arm_scan_prologue for an explanation of
1555 this heuristics. */
1556 if (prologue_end > prologue_start + 64)
1557 {
1558 prologue_end = prologue_start + 64;
1559 }
1560 }
1561 else
1562 /* We're in the boondocks: we have no idea where the start of the
1563 function is. */
1564 return;
1565
1566 prologue_end = min (prologue_end, prev_pc);
1567
1568 thumb_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1569 }
1570
1571 /* Return 1 if THIS_INSTR might change control flow, 0 otherwise. */
1572
1573 static int
1574 arm_instruction_changes_pc (uint32_t this_instr)
1575 {
1576 if (bits (this_instr, 28, 31) == INST_NV)
1577 /* Unconditional instructions. */
1578 switch (bits (this_instr, 24, 27))
1579 {
1580 case 0xa:
1581 case 0xb:
1582 /* Branch with Link and change to Thumb. */
1583 return 1;
1584 case 0xc:
1585 case 0xd:
1586 case 0xe:
1587 /* Coprocessor register transfer. */
1588 if (bits (this_instr, 12, 15) == 15)
1589 error (_("Invalid update to pc in instruction"));
1590 return 0;
1591 default:
1592 return 0;
1593 }
1594 else
1595 switch (bits (this_instr, 25, 27))
1596 {
1597 case 0x0:
1598 if (bits (this_instr, 23, 24) == 2 && bit (this_instr, 20) == 0)
1599 {
1600 /* Multiplies and extra load/stores. */
1601 if (bit (this_instr, 4) == 1 && bit (this_instr, 7) == 1)
1602 /* Neither multiplies nor extension load/stores are allowed
1603 to modify PC. */
1604 return 0;
1605
1606 /* Otherwise, miscellaneous instructions. */
1607
1608 /* BX <reg>, BXJ <reg>, BLX <reg> */
1609 if (bits (this_instr, 4, 27) == 0x12fff1
1610 || bits (this_instr, 4, 27) == 0x12fff2
1611 || bits (this_instr, 4, 27) == 0x12fff3)
1612 return 1;
1613
1614 /* Other miscellaneous instructions are unpredictable if they
1615 modify PC. */
1616 return 0;
1617 }
1618 /* Data processing instruction. Fall through. */
1619
1620 case 0x1:
1621 if (bits (this_instr, 12, 15) == 15)
1622 return 1;
1623 else
1624 return 0;
1625
1626 case 0x2:
1627 case 0x3:
1628 /* Media instructions and architecturally undefined instructions. */
1629 if (bits (this_instr, 25, 27) == 3 && bit (this_instr, 4) == 1)
1630 return 0;
1631
1632 /* Stores. */
1633 if (bit (this_instr, 20) == 0)
1634 return 0;
1635
1636 /* Loads. */
1637 if (bits (this_instr, 12, 15) == ARM_PC_REGNUM)
1638 return 1;
1639 else
1640 return 0;
1641
1642 case 0x4:
1643 /* Load/store multiple. */
1644 if (bit (this_instr, 20) == 1 && bit (this_instr, 15) == 1)
1645 return 1;
1646 else
1647 return 0;
1648
1649 case 0x5:
1650 /* Branch and branch with link. */
1651 return 1;
1652
1653 case 0x6:
1654 case 0x7:
1655 /* Coprocessor transfers or SWIs can not affect PC. */
1656 return 0;
1657
1658 default:
1659 internal_error (__FILE__, __LINE__, _("bad value in switch"));
1660 }
1661 }
1662
1663 /* Analyze an ARM mode prologue starting at PROLOGUE_START and
1664 continuing no further than PROLOGUE_END. If CACHE is non-NULL,
1665 fill it in. Return the first address not recognized as a prologue
1666 instruction.
1667
1668 We recognize all the instructions typically found in ARM prologues,
1669 plus harmless instructions which can be skipped (either for analysis
1670 purposes, or a more restrictive set that can be skipped when finding
1671 the end of the prologue). */
1672
1673 static CORE_ADDR
1674 arm_analyze_prologue (struct gdbarch *gdbarch,
1675 CORE_ADDR prologue_start, CORE_ADDR prologue_end,
1676 struct arm_prologue_cache *cache)
1677 {
1678 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1679 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1680 int regno;
1681 CORE_ADDR offset, current_pc;
1682 pv_t regs[ARM_FPS_REGNUM];
1683 struct pv_area *stack;
1684 struct cleanup *back_to;
1685 int framereg, framesize;
1686 CORE_ADDR unrecognized_pc = 0;
1687
1688 /* Search the prologue looking for instructions that set up the
1689 frame pointer, adjust the stack pointer, and save registers.
1690
1691 Be careful, however, and if it doesn't look like a prologue,
1692 don't try to scan it. If, for instance, a frameless function
1693 begins with stmfd sp!, then we will tell ourselves there is
1694 a frame, which will confuse stack traceback, as well as "finish"
1695 and other operations that rely on a knowledge of the stack
1696 traceback. */
1697
1698 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1699 regs[regno] = pv_register (regno, 0);
1700 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
1701 back_to = make_cleanup_free_pv_area (stack);
1702
1703 for (current_pc = prologue_start;
1704 current_pc < prologue_end;
1705 current_pc += 4)
1706 {
1707 unsigned int insn
1708 = read_memory_unsigned_integer (current_pc, 4, byte_order_for_code);
1709
1710 if (insn == 0xe1a0c00d) /* mov ip, sp */
1711 {
1712 regs[ARM_IP_REGNUM] = regs[ARM_SP_REGNUM];
1713 continue;
1714 }
1715 else if ((insn & 0xfff00000) == 0xe2800000 /* add Rd, Rn, #n */
1716 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1717 {
1718 unsigned imm = insn & 0xff; /* immediate value */
1719 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1720 int rd = bits (insn, 12, 15);
1721 imm = (imm >> rot) | (imm << (32 - rot));
1722 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], imm);
1723 continue;
1724 }
1725 else if ((insn & 0xfff00000) == 0xe2400000 /* sub Rd, Rn, #n */
1726 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1727 {
1728 unsigned imm = insn & 0xff; /* immediate value */
1729 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1730 int rd = bits (insn, 12, 15);
1731 imm = (imm >> rot) | (imm << (32 - rot));
1732 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], -imm);
1733 continue;
1734 }
1735 else if ((insn & 0xffff0fff) == 0xe52d0004) /* str Rd,
1736 [sp, #-4]! */
1737 {
1738 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1739 break;
1740 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1741 pv_area_store (stack, regs[ARM_SP_REGNUM], 4,
1742 regs[bits (insn, 12, 15)]);
1743 continue;
1744 }
1745 else if ((insn & 0xffff0000) == 0xe92d0000)
1746 /* stmfd sp!, {..., fp, ip, lr, pc}
1747 or
1748 stmfd sp!, {a1, a2, a3, a4} */
1749 {
1750 int mask = insn & 0xffff;
1751
1752 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1753 break;
1754
1755 /* Calculate offsets of saved registers. */
1756 for (regno = ARM_PC_REGNUM; regno >= 0; regno--)
1757 if (mask & (1 << regno))
1758 {
1759 regs[ARM_SP_REGNUM]
1760 = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1761 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
1762 }
1763 }
1764 else if ((insn & 0xffff0000) == 0xe54b0000 /* strb rx,[r11,#-n] */
1765 || (insn & 0xffff00f0) == 0xe14b00b0 /* strh rx,[r11,#-n] */
1766 || (insn & 0xffffc000) == 0xe50b0000) /* str rx,[r11,#-n] */
1767 {
1768 /* No need to add this to saved_regs -- it's just an arg reg. */
1769 continue;
1770 }
1771 else if ((insn & 0xffff0000) == 0xe5cd0000 /* strb rx,[sp,#n] */
1772 || (insn & 0xffff00f0) == 0xe1cd00b0 /* strh rx,[sp,#n] */
1773 || (insn & 0xffffc000) == 0xe58d0000) /* str rx,[sp,#n] */
1774 {
1775 /* No need to add this to saved_regs -- it's just an arg reg. */
1776 continue;
1777 }
1778 else if ((insn & 0xfff00000) == 0xe8800000 /* stm Rn,
1779 { registers } */
1780 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1781 {
1782 /* No need to add this to saved_regs -- it's just arg regs. */
1783 continue;
1784 }
1785 else if ((insn & 0xfffff000) == 0xe24cb000) /* sub fp, ip #n */
1786 {
1787 unsigned imm = insn & 0xff; /* immediate value */
1788 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1789 imm = (imm >> rot) | (imm << (32 - rot));
1790 regs[ARM_FP_REGNUM] = pv_add_constant (regs[ARM_IP_REGNUM], -imm);
1791 }
1792 else if ((insn & 0xfffff000) == 0xe24dd000) /* sub sp, sp #n */
1793 {
1794 unsigned imm = insn & 0xff; /* immediate value */
1795 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1796 imm = (imm >> rot) | (imm << (32 - rot));
1797 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -imm);
1798 }
1799 else if ((insn & 0xffff7fff) == 0xed6d0103 /* stfe f?,
1800 [sp, -#c]! */
1801 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1802 {
1803 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1804 break;
1805
1806 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1807 regno = ARM_F0_REGNUM + ((insn >> 12) & 0x07);
1808 pv_area_store (stack, regs[ARM_SP_REGNUM], 12, regs[regno]);
1809 }
1810 else if ((insn & 0xffbf0fff) == 0xec2d0200 /* sfmfd f0, 4,
1811 [sp!] */
1812 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1813 {
1814 int n_saved_fp_regs;
1815 unsigned int fp_start_reg, fp_bound_reg;
1816
1817 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1818 break;
1819
1820 if ((insn & 0x800) == 0x800) /* N0 is set */
1821 {
1822 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1823 n_saved_fp_regs = 3;
1824 else
1825 n_saved_fp_regs = 1;
1826 }
1827 else
1828 {
1829 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1830 n_saved_fp_regs = 2;
1831 else
1832 n_saved_fp_regs = 4;
1833 }
1834
1835 fp_start_reg = ARM_F0_REGNUM + ((insn >> 12) & 0x7);
1836 fp_bound_reg = fp_start_reg + n_saved_fp_regs;
1837 for (; fp_start_reg < fp_bound_reg; fp_start_reg++)
1838 {
1839 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1840 pv_area_store (stack, regs[ARM_SP_REGNUM], 12,
1841 regs[fp_start_reg++]);
1842 }
1843 }
1844 else if ((insn & 0xff000000) == 0xeb000000 && cache == NULL) /* bl */
1845 {
1846 /* Allow some special function calls when skipping the
1847 prologue; GCC generates these before storing arguments to
1848 the stack. */
1849 CORE_ADDR dest = BranchDest (current_pc, insn);
1850
1851 if (skip_prologue_function (gdbarch, dest, 0))
1852 continue;
1853 else
1854 break;
1855 }
1856 else if ((insn & 0xf0000000) != 0xe0000000)
1857 break; /* Condition not true, exit early. */
1858 else if (arm_instruction_changes_pc (insn))
1859 /* Don't scan past anything that might change control flow. */
1860 break;
1861 else if ((insn & 0xfe500000) == 0xe8100000 /* ldm */
1862 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1863 /* Ignore block loads from the stack, potentially copying
1864 parameters from memory. */
1865 continue;
1866 else if ((insn & 0xfc500000) == 0xe4100000
1867 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1868 /* Similarly ignore single loads from the stack. */
1869 continue;
1870 else if ((insn & 0xffff0ff0) == 0xe1a00000)
1871 /* MOV Rd, Rm. Skip register copies, i.e. saves to another
1872 register instead of the stack. */
1873 continue;
1874 else
1875 {
1876 /* The optimizer might shove anything into the prologue,
1877 so we just skip what we don't recognize. */
1878 unrecognized_pc = current_pc;
1879 continue;
1880 }
1881 }
1882
1883 if (unrecognized_pc == 0)
1884 unrecognized_pc = current_pc;
1885
1886 /* The frame size is just the distance from the frame register
1887 to the original stack pointer. */
1888 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1889 {
1890 /* Frame pointer is fp. */
1891 framereg = ARM_FP_REGNUM;
1892 framesize = -regs[ARM_FP_REGNUM].k;
1893 }
1894 else
1895 {
1896 /* Try the stack pointer... this is a bit desperate. */
1897 framereg = ARM_SP_REGNUM;
1898 framesize = -regs[ARM_SP_REGNUM].k;
1899 }
1900
1901 if (cache)
1902 {
1903 cache->framereg = framereg;
1904 cache->framesize = framesize;
1905
1906 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1907 if (pv_area_find_reg (stack, gdbarch, regno, &offset))
1908 cache->saved_regs[regno].addr = offset;
1909 }
1910
1911 if (arm_debug)
1912 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1913 paddress (gdbarch, unrecognized_pc));
1914
1915 do_cleanups (back_to);
1916 return unrecognized_pc;
1917 }
1918
1919 static void
1920 arm_scan_prologue (struct frame_info *this_frame,
1921 struct arm_prologue_cache *cache)
1922 {
1923 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1924 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1925 int regno;
1926 CORE_ADDR prologue_start, prologue_end, current_pc;
1927 CORE_ADDR prev_pc = get_frame_pc (this_frame);
1928 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
1929 pv_t regs[ARM_FPS_REGNUM];
1930 struct pv_area *stack;
1931 struct cleanup *back_to;
1932 CORE_ADDR offset;
1933
1934 /* Assume there is no frame until proven otherwise. */
1935 cache->framereg = ARM_SP_REGNUM;
1936 cache->framesize = 0;
1937
1938 /* Check for Thumb prologue. */
1939 if (arm_frame_is_thumb (this_frame))
1940 {
1941 thumb_scan_prologue (gdbarch, prev_pc, block_addr, cache);
1942 return;
1943 }
1944
1945 /* Find the function prologue. If we can't find the function in
1946 the symbol table, peek in the stack frame to find the PC. */
1947 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1948 &prologue_end))
1949 {
1950 /* One way to find the end of the prologue (which works well
1951 for unoptimized code) is to do the following:
1952
1953 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
1954
1955 if (sal.line == 0)
1956 prologue_end = prev_pc;
1957 else if (sal.end < prologue_end)
1958 prologue_end = sal.end;
1959
1960 This mechanism is very accurate so long as the optimizer
1961 doesn't move any instructions from the function body into the
1962 prologue. If this happens, sal.end will be the last
1963 instruction in the first hunk of prologue code just before
1964 the first instruction that the scheduler has moved from
1965 the body to the prologue.
1966
1967 In order to make sure that we scan all of the prologue
1968 instructions, we use a slightly less accurate mechanism which
1969 may scan more than necessary. To help compensate for this
1970 lack of accuracy, the prologue scanning loop below contains
1971 several clauses which'll cause the loop to terminate early if
1972 an implausible prologue instruction is encountered.
1973
1974 The expression
1975
1976 prologue_start + 64
1977
1978 is a suitable endpoint since it accounts for the largest
1979 possible prologue plus up to five instructions inserted by
1980 the scheduler. */
1981
1982 if (prologue_end > prologue_start + 64)
1983 {
1984 prologue_end = prologue_start + 64; /* See above. */
1985 }
1986 }
1987 else
1988 {
1989 /* We have no symbol information. Our only option is to assume this
1990 function has a standard stack frame and the normal frame register.
1991 Then, we can find the value of our frame pointer on entrance to
1992 the callee (or at the present moment if this is the innermost frame).
1993 The value stored there should be the address of the stmfd + 8. */
1994 CORE_ADDR frame_loc;
1995 LONGEST return_value;
1996
1997 frame_loc = get_frame_register_unsigned (this_frame, ARM_FP_REGNUM);
1998 if (!safe_read_memory_integer (frame_loc, 4, byte_order, &return_value))
1999 return;
2000 else
2001 {
2002 prologue_start = gdbarch_addr_bits_remove
2003 (gdbarch, return_value) - 8;
2004 prologue_end = prologue_start + 64; /* See above. */
2005 }
2006 }
2007
2008 if (prev_pc < prologue_end)
2009 prologue_end = prev_pc;
2010
2011 arm_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
2012 }
2013
2014 static struct arm_prologue_cache *
2015 arm_make_prologue_cache (struct frame_info *this_frame)
2016 {
2017 int reg;
2018 struct arm_prologue_cache *cache;
2019 CORE_ADDR unwound_fp;
2020
2021 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2022 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2023
2024 arm_scan_prologue (this_frame, cache);
2025
2026 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
2027 if (unwound_fp == 0)
2028 return cache;
2029
2030 cache->prev_sp = unwound_fp + cache->framesize;
2031
2032 /* Calculate actual addresses of saved registers using offsets
2033 determined by arm_scan_prologue. */
2034 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
2035 if (trad_frame_addr_p (cache->saved_regs, reg))
2036 cache->saved_regs[reg].addr += cache->prev_sp;
2037
2038 return cache;
2039 }
2040
2041 /* Our frame ID for a normal frame is the current function's starting PC
2042 and the caller's SP when we were called. */
2043
2044 static void
2045 arm_prologue_this_id (struct frame_info *this_frame,
2046 void **this_cache,
2047 struct frame_id *this_id)
2048 {
2049 struct arm_prologue_cache *cache;
2050 struct frame_id id;
2051 CORE_ADDR pc, func;
2052
2053 if (*this_cache == NULL)
2054 *this_cache = arm_make_prologue_cache (this_frame);
2055 cache = *this_cache;
2056
2057 /* This is meant to halt the backtrace at "_start". */
2058 pc = get_frame_pc (this_frame);
2059 if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
2060 return;
2061
2062 /* If we've hit a wall, stop. */
2063 if (cache->prev_sp == 0)
2064 return;
2065
2066 /* Use function start address as part of the frame ID. If we cannot
2067 identify the start address (due to missing symbol information),
2068 fall back to just using the current PC. */
2069 func = get_frame_func (this_frame);
2070 if (!func)
2071 func = pc;
2072
2073 id = frame_id_build (cache->prev_sp, func);
2074 *this_id = id;
2075 }
2076
2077 static struct value *
2078 arm_prologue_prev_register (struct frame_info *this_frame,
2079 void **this_cache,
2080 int prev_regnum)
2081 {
2082 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2083 struct arm_prologue_cache *cache;
2084
2085 if (*this_cache == NULL)
2086 *this_cache = arm_make_prologue_cache (this_frame);
2087 cache = *this_cache;
2088
2089 /* If we are asked to unwind the PC, then we need to return the LR
2090 instead. The prologue may save PC, but it will point into this
2091 frame's prologue, not the next frame's resume location. Also
2092 strip the saved T bit. A valid LR may have the low bit set, but
2093 a valid PC never does. */
2094 if (prev_regnum == ARM_PC_REGNUM)
2095 {
2096 CORE_ADDR lr;
2097
2098 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2099 return frame_unwind_got_constant (this_frame, prev_regnum,
2100 arm_addr_bits_remove (gdbarch, lr));
2101 }
2102
2103 /* SP is generally not saved to the stack, but this frame is
2104 identified by the next frame's stack pointer at the time of the call.
2105 The value was already reconstructed into PREV_SP. */
2106 if (prev_regnum == ARM_SP_REGNUM)
2107 return frame_unwind_got_constant (this_frame, prev_regnum, cache->prev_sp);
2108
2109 /* The CPSR may have been changed by the call instruction and by the
2110 called function. The only bit we can reconstruct is the T bit,
2111 by checking the low bit of LR as of the call. This is a reliable
2112 indicator of Thumb-ness except for some ARM v4T pre-interworking
2113 Thumb code, which could get away with a clear low bit as long as
2114 the called function did not use bx. Guess that all other
2115 bits are unchanged; the condition flags are presumably lost,
2116 but the processor status is likely valid. */
2117 if (prev_regnum == ARM_PS_REGNUM)
2118 {
2119 CORE_ADDR lr, cpsr;
2120 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
2121
2122 cpsr = get_frame_register_unsigned (this_frame, prev_regnum);
2123 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2124 if (IS_THUMB_ADDR (lr))
2125 cpsr |= t_bit;
2126 else
2127 cpsr &= ~t_bit;
2128 return frame_unwind_got_constant (this_frame, prev_regnum, cpsr);
2129 }
2130
2131 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
2132 prev_regnum);
2133 }
2134
2135 struct frame_unwind arm_prologue_unwind = {
2136 NORMAL_FRAME,
2137 default_frame_unwind_stop_reason,
2138 arm_prologue_this_id,
2139 arm_prologue_prev_register,
2140 NULL,
2141 default_frame_sniffer
2142 };
2143
2144 /* Maintain a list of ARM exception table entries per objfile, similar to the
2145 list of mapping symbols. We only cache entries for standard ARM-defined
2146 personality routines; the cache will contain only the frame unwinding
2147 instructions associated with the entry (not the descriptors). */
2148
2149 static const struct objfile_data *arm_exidx_data_key;
2150
2151 struct arm_exidx_entry
2152 {
2153 bfd_vma addr;
2154 gdb_byte *entry;
2155 };
2156 typedef struct arm_exidx_entry arm_exidx_entry_s;
2157 DEF_VEC_O(arm_exidx_entry_s);
2158
2159 struct arm_exidx_data
2160 {
2161 VEC(arm_exidx_entry_s) **section_maps;
2162 };
2163
2164 static void
2165 arm_exidx_data_free (struct objfile *objfile, void *arg)
2166 {
2167 struct arm_exidx_data *data = arg;
2168 unsigned int i;
2169
2170 for (i = 0; i < objfile->obfd->section_count; i++)
2171 VEC_free (arm_exidx_entry_s, data->section_maps[i]);
2172 }
2173
2174 static inline int
2175 arm_compare_exidx_entries (const struct arm_exidx_entry *lhs,
2176 const struct arm_exidx_entry *rhs)
2177 {
2178 return lhs->addr < rhs->addr;
2179 }
2180
2181 static struct obj_section *
2182 arm_obj_section_from_vma (struct objfile *objfile, bfd_vma vma)
2183 {
2184 struct obj_section *osect;
2185
2186 ALL_OBJFILE_OSECTIONS (objfile, osect)
2187 if (bfd_get_section_flags (objfile->obfd,
2188 osect->the_bfd_section) & SEC_ALLOC)
2189 {
2190 bfd_vma start, size;
2191 start = bfd_get_section_vma (objfile->obfd, osect->the_bfd_section);
2192 size = bfd_get_section_size (osect->the_bfd_section);
2193
2194 if (start <= vma && vma < start + size)
2195 return osect;
2196 }
2197
2198 return NULL;
2199 }
2200
2201 /* Parse contents of exception table and exception index sections
2202 of OBJFILE, and fill in the exception table entry cache.
2203
2204 For each entry that refers to a standard ARM-defined personality
2205 routine, extract the frame unwinding instructions (from either
2206 the index or the table section). The unwinding instructions
2207 are normalized by:
2208 - extracting them from the rest of the table data
2209 - converting to host endianness
2210 - appending the implicit 0xb0 ("Finish") code
2211
2212 The extracted and normalized instructions are stored for later
2213 retrieval by the arm_find_exidx_entry routine. */
2214
2215 static void
2216 arm_exidx_new_objfile (struct objfile *objfile)
2217 {
2218 struct cleanup *cleanups;
2219 struct arm_exidx_data *data;
2220 asection *exidx, *extab;
2221 bfd_vma exidx_vma = 0, extab_vma = 0;
2222 bfd_size_type exidx_size = 0, extab_size = 0;
2223 gdb_byte *exidx_data = NULL, *extab_data = NULL;
2224 LONGEST i;
2225
2226 /* If we've already touched this file, do nothing. */
2227 if (!objfile || objfile_data (objfile, arm_exidx_data_key) != NULL)
2228 return;
2229 cleanups = make_cleanup (null_cleanup, NULL);
2230
2231 /* Read contents of exception table and index. */
2232 exidx = bfd_get_section_by_name (objfile->obfd, ".ARM.exidx");
2233 if (exidx)
2234 {
2235 exidx_vma = bfd_section_vma (objfile->obfd, exidx);
2236 exidx_size = bfd_get_section_size (exidx);
2237 exidx_data = xmalloc (exidx_size);
2238 make_cleanup (xfree, exidx_data);
2239
2240 if (!bfd_get_section_contents (objfile->obfd, exidx,
2241 exidx_data, 0, exidx_size))
2242 {
2243 do_cleanups (cleanups);
2244 return;
2245 }
2246 }
2247
2248 extab = bfd_get_section_by_name (objfile->obfd, ".ARM.extab");
2249 if (extab)
2250 {
2251 extab_vma = bfd_section_vma (objfile->obfd, extab);
2252 extab_size = bfd_get_section_size (extab);
2253 extab_data = xmalloc (extab_size);
2254 make_cleanup (xfree, extab_data);
2255
2256 if (!bfd_get_section_contents (objfile->obfd, extab,
2257 extab_data, 0, extab_size))
2258 {
2259 do_cleanups (cleanups);
2260 return;
2261 }
2262 }
2263
2264 /* Allocate exception table data structure. */
2265 data = OBSTACK_ZALLOC (&objfile->objfile_obstack, struct arm_exidx_data);
2266 set_objfile_data (objfile, arm_exidx_data_key, data);
2267 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
2268 objfile->obfd->section_count,
2269 VEC(arm_exidx_entry_s) *);
2270
2271 /* Fill in exception table. */
2272 for (i = 0; i < exidx_size / 8; i++)
2273 {
2274 struct arm_exidx_entry new_exidx_entry;
2275 bfd_vma idx = bfd_h_get_32 (objfile->obfd, exidx_data + i * 8);
2276 bfd_vma val = bfd_h_get_32 (objfile->obfd, exidx_data + i * 8 + 4);
2277 bfd_vma addr = 0, word = 0;
2278 int n_bytes = 0, n_words = 0;
2279 struct obj_section *sec;
2280 gdb_byte *entry = NULL;
2281
2282 /* Extract address of start of function. */
2283 idx = ((idx & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2284 idx += exidx_vma + i * 8;
2285
2286 /* Find section containing function and compute section offset. */
2287 sec = arm_obj_section_from_vma (objfile, idx);
2288 if (sec == NULL)
2289 continue;
2290 idx -= bfd_get_section_vma (objfile->obfd, sec->the_bfd_section);
2291
2292 /* Determine address of exception table entry. */
2293 if (val == 1)
2294 {
2295 /* EXIDX_CANTUNWIND -- no exception table entry present. */
2296 }
2297 else if ((val & 0xff000000) == 0x80000000)
2298 {
2299 /* Exception table entry embedded in .ARM.exidx
2300 -- must be short form. */
2301 word = val;
2302 n_bytes = 3;
2303 }
2304 else if (!(val & 0x80000000))
2305 {
2306 /* Exception table entry in .ARM.extab. */
2307 addr = ((val & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2308 addr += exidx_vma + i * 8 + 4;
2309
2310 if (addr >= extab_vma && addr + 4 <= extab_vma + extab_size)
2311 {
2312 word = bfd_h_get_32 (objfile->obfd,
2313 extab_data + addr - extab_vma);
2314 addr += 4;
2315
2316 if ((word & 0xff000000) == 0x80000000)
2317 {
2318 /* Short form. */
2319 n_bytes = 3;
2320 }
2321 else if ((word & 0xff000000) == 0x81000000
2322 || (word & 0xff000000) == 0x82000000)
2323 {
2324 /* Long form. */
2325 n_bytes = 2;
2326 n_words = ((word >> 16) & 0xff);
2327 }
2328 else if (!(word & 0x80000000))
2329 {
2330 bfd_vma pers;
2331 struct obj_section *pers_sec;
2332 int gnu_personality = 0;
2333
2334 /* Custom personality routine. */
2335 pers = ((word & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2336 pers = UNMAKE_THUMB_ADDR (pers + addr - 4);
2337
2338 /* Check whether we've got one of the variants of the
2339 GNU personality routines. */
2340 pers_sec = arm_obj_section_from_vma (objfile, pers);
2341 if (pers_sec)
2342 {
2343 static const char *personality[] =
2344 {
2345 "__gcc_personality_v0",
2346 "__gxx_personality_v0",
2347 "__gcj_personality_v0",
2348 "__gnu_objc_personality_v0",
2349 NULL
2350 };
2351
2352 CORE_ADDR pc = pers + obj_section_offset (pers_sec);
2353 int k;
2354
2355 for (k = 0; personality[k]; k++)
2356 if (lookup_minimal_symbol_by_pc_name
2357 (pc, personality[k], objfile))
2358 {
2359 gnu_personality = 1;
2360 break;
2361 }
2362 }
2363
2364 /* If so, the next word contains a word count in the high
2365 byte, followed by the same unwind instructions as the
2366 pre-defined forms. */
2367 if (gnu_personality
2368 && addr + 4 <= extab_vma + extab_size)
2369 {
2370 word = bfd_h_get_32 (objfile->obfd,
2371 extab_data + addr - extab_vma);
2372 addr += 4;
2373 n_bytes = 3;
2374 n_words = ((word >> 24) & 0xff);
2375 }
2376 }
2377 }
2378 }
2379
2380 /* Sanity check address. */
2381 if (n_words)
2382 if (addr < extab_vma || addr + 4 * n_words > extab_vma + extab_size)
2383 n_words = n_bytes = 0;
2384
2385 /* The unwind instructions reside in WORD (only the N_BYTES least
2386 significant bytes are valid), followed by N_WORDS words in the
2387 extab section starting at ADDR. */
2388 if (n_bytes || n_words)
2389 {
2390 gdb_byte *p = entry = obstack_alloc (&objfile->objfile_obstack,
2391 n_bytes + n_words * 4 + 1);
2392
2393 while (n_bytes--)
2394 *p++ = (gdb_byte) ((word >> (8 * n_bytes)) & 0xff);
2395
2396 while (n_words--)
2397 {
2398 word = bfd_h_get_32 (objfile->obfd,
2399 extab_data + addr - extab_vma);
2400 addr += 4;
2401
2402 *p++ = (gdb_byte) ((word >> 24) & 0xff);
2403 *p++ = (gdb_byte) ((word >> 16) & 0xff);
2404 *p++ = (gdb_byte) ((word >> 8) & 0xff);
2405 *p++ = (gdb_byte) (word & 0xff);
2406 }
2407
2408 /* Implied "Finish" to terminate the list. */
2409 *p++ = 0xb0;
2410 }
2411
2412 /* Push entry onto vector. They are guaranteed to always
2413 appear in order of increasing addresses. */
2414 new_exidx_entry.addr = idx;
2415 new_exidx_entry.entry = entry;
2416 VEC_safe_push (arm_exidx_entry_s,
2417 data->section_maps[sec->the_bfd_section->index],
2418 &new_exidx_entry);
2419 }
2420
2421 do_cleanups (cleanups);
2422 }
2423
2424 /* Search for the exception table entry covering MEMADDR. If one is found,
2425 return a pointer to its data. Otherwise, return 0. If START is non-NULL,
2426 set *START to the start of the region covered by this entry. */
2427
2428 static gdb_byte *
2429 arm_find_exidx_entry (CORE_ADDR memaddr, CORE_ADDR *start)
2430 {
2431 struct obj_section *sec;
2432
2433 sec = find_pc_section (memaddr);
2434 if (sec != NULL)
2435 {
2436 struct arm_exidx_data *data;
2437 VEC(arm_exidx_entry_s) *map;
2438 struct arm_exidx_entry map_key = { memaddr - obj_section_addr (sec), 0 };
2439 unsigned int idx;
2440
2441 data = objfile_data (sec->objfile, arm_exidx_data_key);
2442 if (data != NULL)
2443 {
2444 map = data->section_maps[sec->the_bfd_section->index];
2445 if (!VEC_empty (arm_exidx_entry_s, map))
2446 {
2447 struct arm_exidx_entry *map_sym;
2448
2449 idx = VEC_lower_bound (arm_exidx_entry_s, map, &map_key,
2450 arm_compare_exidx_entries);
2451
2452 /* VEC_lower_bound finds the earliest ordered insertion
2453 point. If the following symbol starts at this exact
2454 address, we use that; otherwise, the preceding
2455 exception table entry covers this address. */
2456 if (idx < VEC_length (arm_exidx_entry_s, map))
2457 {
2458 map_sym = VEC_index (arm_exidx_entry_s, map, idx);
2459 if (map_sym->addr == map_key.addr)
2460 {
2461 if (start)
2462 *start = map_sym->addr + obj_section_addr (sec);
2463 return map_sym->entry;
2464 }
2465 }
2466
2467 if (idx > 0)
2468 {
2469 map_sym = VEC_index (arm_exidx_entry_s, map, idx - 1);
2470 if (start)
2471 *start = map_sym->addr + obj_section_addr (sec);
2472 return map_sym->entry;
2473 }
2474 }
2475 }
2476 }
2477
2478 return NULL;
2479 }
2480
2481 /* Given the current frame THIS_FRAME, and its associated frame unwinding
2482 instruction list from the ARM exception table entry ENTRY, allocate and
2483 return a prologue cache structure describing how to unwind this frame.
2484
2485 Return NULL if the unwinding instruction list contains a "spare",
2486 "reserved" or "refuse to unwind" instruction as defined in section
2487 "9.3 Frame unwinding instructions" of the "Exception Handling ABI
2488 for the ARM Architecture" document. */
2489
2490 static struct arm_prologue_cache *
2491 arm_exidx_fill_cache (struct frame_info *this_frame, gdb_byte *entry)
2492 {
2493 CORE_ADDR vsp = 0;
2494 int vsp_valid = 0;
2495
2496 struct arm_prologue_cache *cache;
2497 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2498 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2499
2500 for (;;)
2501 {
2502 gdb_byte insn;
2503
2504 /* Whenever we reload SP, we actually have to retrieve its
2505 actual value in the current frame. */
2506 if (!vsp_valid)
2507 {
2508 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM))
2509 {
2510 int reg = cache->saved_regs[ARM_SP_REGNUM].realreg;
2511 vsp = get_frame_register_unsigned (this_frame, reg);
2512 }
2513 else
2514 {
2515 CORE_ADDR addr = cache->saved_regs[ARM_SP_REGNUM].addr;
2516 vsp = get_frame_memory_unsigned (this_frame, addr, 4);
2517 }
2518
2519 vsp_valid = 1;
2520 }
2521
2522 /* Decode next unwind instruction. */
2523 insn = *entry++;
2524
2525 if ((insn & 0xc0) == 0)
2526 {
2527 int offset = insn & 0x3f;
2528 vsp += (offset << 2) + 4;
2529 }
2530 else if ((insn & 0xc0) == 0x40)
2531 {
2532 int offset = insn & 0x3f;
2533 vsp -= (offset << 2) + 4;
2534 }
2535 else if ((insn & 0xf0) == 0x80)
2536 {
2537 int mask = ((insn & 0xf) << 8) | *entry++;
2538 int i;
2539
2540 /* The special case of an all-zero mask identifies
2541 "Refuse to unwind". We return NULL to fall back
2542 to the prologue analyzer. */
2543 if (mask == 0)
2544 return NULL;
2545
2546 /* Pop registers r4..r15 under mask. */
2547 for (i = 0; i < 12; i++)
2548 if (mask & (1 << i))
2549 {
2550 cache->saved_regs[4 + i].addr = vsp;
2551 vsp += 4;
2552 }
2553
2554 /* Special-case popping SP -- we need to reload vsp. */
2555 if (mask & (1 << (ARM_SP_REGNUM - 4)))
2556 vsp_valid = 0;
2557 }
2558 else if ((insn & 0xf0) == 0x90)
2559 {
2560 int reg = insn & 0xf;
2561
2562 /* Reserved cases. */
2563 if (reg == ARM_SP_REGNUM || reg == ARM_PC_REGNUM)
2564 return NULL;
2565
2566 /* Set SP from another register and mark VSP for reload. */
2567 cache->saved_regs[ARM_SP_REGNUM] = cache->saved_regs[reg];
2568 vsp_valid = 0;
2569 }
2570 else if ((insn & 0xf0) == 0xa0)
2571 {
2572 int count = insn & 0x7;
2573 int pop_lr = (insn & 0x8) != 0;
2574 int i;
2575
2576 /* Pop r4..r[4+count]. */
2577 for (i = 0; i <= count; i++)
2578 {
2579 cache->saved_regs[4 + i].addr = vsp;
2580 vsp += 4;
2581 }
2582
2583 /* If indicated by flag, pop LR as well. */
2584 if (pop_lr)
2585 {
2586 cache->saved_regs[ARM_LR_REGNUM].addr = vsp;
2587 vsp += 4;
2588 }
2589 }
2590 else if (insn == 0xb0)
2591 {
2592 /* We could only have updated PC by popping into it; if so, it
2593 will show up as address. Otherwise, copy LR into PC. */
2594 if (!trad_frame_addr_p (cache->saved_regs, ARM_PC_REGNUM))
2595 cache->saved_regs[ARM_PC_REGNUM]
2596 = cache->saved_regs[ARM_LR_REGNUM];
2597
2598 /* We're done. */
2599 break;
2600 }
2601 else if (insn == 0xb1)
2602 {
2603 int mask = *entry++;
2604 int i;
2605
2606 /* All-zero mask and mask >= 16 is "spare". */
2607 if (mask == 0 || mask >= 16)
2608 return NULL;
2609
2610 /* Pop r0..r3 under mask. */
2611 for (i = 0; i < 4; i++)
2612 if (mask & (1 << i))
2613 {
2614 cache->saved_regs[i].addr = vsp;
2615 vsp += 4;
2616 }
2617 }
2618 else if (insn == 0xb2)
2619 {
2620 ULONGEST offset = 0;
2621 unsigned shift = 0;
2622
2623 do
2624 {
2625 offset |= (*entry & 0x7f) << shift;
2626 shift += 7;
2627 }
2628 while (*entry++ & 0x80);
2629
2630 vsp += 0x204 + (offset << 2);
2631 }
2632 else if (insn == 0xb3)
2633 {
2634 int start = *entry >> 4;
2635 int count = (*entry++) & 0xf;
2636 int i;
2637
2638 /* Only registers D0..D15 are valid here. */
2639 if (start + count >= 16)
2640 return NULL;
2641
2642 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2643 for (i = 0; i <= count; i++)
2644 {
2645 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp;
2646 vsp += 8;
2647 }
2648
2649 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2650 vsp += 4;
2651 }
2652 else if ((insn & 0xf8) == 0xb8)
2653 {
2654 int count = insn & 0x7;
2655 int i;
2656
2657 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2658 for (i = 0; i <= count; i++)
2659 {
2660 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp;
2661 vsp += 8;
2662 }
2663
2664 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2665 vsp += 4;
2666 }
2667 else if (insn == 0xc6)
2668 {
2669 int start = *entry >> 4;
2670 int count = (*entry++) & 0xf;
2671 int i;
2672
2673 /* Only registers WR0..WR15 are valid. */
2674 if (start + count >= 16)
2675 return NULL;
2676
2677 /* Pop iwmmx registers WR[start]..WR[start+count]. */
2678 for (i = 0; i <= count; i++)
2679 {
2680 cache->saved_regs[ARM_WR0_REGNUM + start + i].addr = vsp;
2681 vsp += 8;
2682 }
2683 }
2684 else if (insn == 0xc7)
2685 {
2686 int mask = *entry++;
2687 int i;
2688
2689 /* All-zero mask and mask >= 16 is "spare". */
2690 if (mask == 0 || mask >= 16)
2691 return NULL;
2692
2693 /* Pop iwmmx general-purpose registers WCGR0..WCGR3 under mask. */
2694 for (i = 0; i < 4; i++)
2695 if (mask & (1 << i))
2696 {
2697 cache->saved_regs[ARM_WCGR0_REGNUM + i].addr = vsp;
2698 vsp += 4;
2699 }
2700 }
2701 else if ((insn & 0xf8) == 0xc0)
2702 {
2703 int count = insn & 0x7;
2704 int i;
2705
2706 /* Pop iwmmx registers WR[10]..WR[10+count]. */
2707 for (i = 0; i <= count; i++)
2708 {
2709 cache->saved_regs[ARM_WR0_REGNUM + 10 + i].addr = vsp;
2710 vsp += 8;
2711 }
2712 }
2713 else if (insn == 0xc8)
2714 {
2715 int start = *entry >> 4;
2716 int count = (*entry++) & 0xf;
2717 int i;
2718
2719 /* Only registers D0..D31 are valid. */
2720 if (start + count >= 16)
2721 return NULL;
2722
2723 /* Pop VFP double-precision registers
2724 D[16+start]..D[16+start+count]. */
2725 for (i = 0; i <= count; i++)
2726 {
2727 cache->saved_regs[ARM_D0_REGNUM + 16 + start + i].addr = vsp;
2728 vsp += 8;
2729 }
2730 }
2731 else if (insn == 0xc9)
2732 {
2733 int start = *entry >> 4;
2734 int count = (*entry++) & 0xf;
2735 int i;
2736
2737 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2738 for (i = 0; i <= count; i++)
2739 {
2740 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp;
2741 vsp += 8;
2742 }
2743 }
2744 else if ((insn & 0xf8) == 0xd0)
2745 {
2746 int count = insn & 0x7;
2747 int i;
2748
2749 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2750 for (i = 0; i <= count; i++)
2751 {
2752 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp;
2753 vsp += 8;
2754 }
2755 }
2756 else
2757 {
2758 /* Everything else is "spare". */
2759 return NULL;
2760 }
2761 }
2762
2763 /* If we restore SP from a register, assume this was the frame register.
2764 Otherwise just fall back to SP as frame register. */
2765 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM))
2766 cache->framereg = cache->saved_regs[ARM_SP_REGNUM].realreg;
2767 else
2768 cache->framereg = ARM_SP_REGNUM;
2769
2770 /* Determine offset to previous frame. */
2771 cache->framesize
2772 = vsp - get_frame_register_unsigned (this_frame, cache->framereg);
2773
2774 /* We already got the previous SP. */
2775 cache->prev_sp = vsp;
2776
2777 return cache;
2778 }
2779
2780 /* Unwinding via ARM exception table entries. Note that the sniffer
2781 already computes a filled-in prologue cache, which is then used
2782 with the same arm_prologue_this_id and arm_prologue_prev_register
2783 routines also used for prologue-parsing based unwinding. */
2784
2785 static int
2786 arm_exidx_unwind_sniffer (const struct frame_unwind *self,
2787 struct frame_info *this_frame,
2788 void **this_prologue_cache)
2789 {
2790 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2791 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2792 CORE_ADDR addr_in_block, exidx_region, func_start;
2793 struct arm_prologue_cache *cache;
2794 gdb_byte *entry;
2795
2796 /* See if we have an ARM exception table entry covering this address. */
2797 addr_in_block = get_frame_address_in_block (this_frame);
2798 entry = arm_find_exidx_entry (addr_in_block, &exidx_region);
2799 if (!entry)
2800 return 0;
2801
2802 /* The ARM exception table does not describe unwind information
2803 for arbitrary PC values, but is guaranteed to be correct only
2804 at call sites. We have to decide here whether we want to use
2805 ARM exception table information for this frame, or fall back
2806 to using prologue parsing. (Note that if we have DWARF CFI,
2807 this sniffer isn't even called -- CFI is always preferred.)
2808
2809 Before we make this decision, however, we check whether we
2810 actually have *symbol* information for the current frame.
2811 If not, prologue parsing would not work anyway, so we might
2812 as well use the exception table and hope for the best. */
2813 if (find_pc_partial_function (addr_in_block, NULL, &func_start, NULL))
2814 {
2815 int exc_valid = 0;
2816
2817 /* If the next frame is "normal", we are at a call site in this
2818 frame, so exception information is guaranteed to be valid. */
2819 if (get_next_frame (this_frame)
2820 && get_frame_type (get_next_frame (this_frame)) == NORMAL_FRAME)
2821 exc_valid = 1;
2822
2823 /* We also assume exception information is valid if we're currently
2824 blocked in a system call. The system library is supposed to
2825 ensure this, so that e.g. pthread cancellation works. */
2826 if (arm_frame_is_thumb (this_frame))
2827 {
2828 LONGEST insn;
2829
2830 if (safe_read_memory_integer (get_frame_pc (this_frame) - 2, 2,
2831 byte_order_for_code, &insn)
2832 && (insn & 0xff00) == 0xdf00 /* svc */)
2833 exc_valid = 1;
2834 }
2835 else
2836 {
2837 LONGEST insn;
2838
2839 if (safe_read_memory_integer (get_frame_pc (this_frame) - 4, 4,
2840 byte_order_for_code, &insn)
2841 && (insn & 0x0f000000) == 0x0f000000 /* svc */)
2842 exc_valid = 1;
2843 }
2844
2845 /* Bail out if we don't know that exception information is valid. */
2846 if (!exc_valid)
2847 return 0;
2848
2849 /* The ARM exception index does not mark the *end* of the region
2850 covered by the entry, and some functions will not have any entry.
2851 To correctly recognize the end of the covered region, the linker
2852 should have inserted dummy records with a CANTUNWIND marker.
2853
2854 Unfortunately, current versions of GNU ld do not reliably do
2855 this, and thus we may have found an incorrect entry above.
2856 As a (temporary) sanity check, we only use the entry if it
2857 lies *within* the bounds of the function. Note that this check
2858 might reject perfectly valid entries that just happen to cover
2859 multiple functions; therefore this check ought to be removed
2860 once the linker is fixed. */
2861 if (func_start > exidx_region)
2862 return 0;
2863 }
2864
2865 /* Decode the list of unwinding instructions into a prologue cache.
2866 Note that this may fail due to e.g. a "refuse to unwind" code. */
2867 cache = arm_exidx_fill_cache (this_frame, entry);
2868 if (!cache)
2869 return 0;
2870
2871 *this_prologue_cache = cache;
2872 return 1;
2873 }
2874
2875 struct frame_unwind arm_exidx_unwind = {
2876 NORMAL_FRAME,
2877 default_frame_unwind_stop_reason,
2878 arm_prologue_this_id,
2879 arm_prologue_prev_register,
2880 NULL,
2881 arm_exidx_unwind_sniffer
2882 };
2883
2884 /* Recognize GCC's trampoline for thumb call-indirect. If we are in a
2885 trampoline, return the target PC. Otherwise return 0.
2886
2887 void call0a (char c, short s, int i, long l) {}
2888
2889 int main (void)
2890 {
2891 (*pointer_to_call0a) (c, s, i, l);
2892 }
2893
2894 Instead of calling a stub library function _call_via_xx (xx is
2895 the register name), GCC may inline the trampoline in the object
2896 file as below (register r2 has the address of call0a).
2897
2898 .global main
2899 .type main, %function
2900 ...
2901 bl .L1
2902 ...
2903 .size main, .-main
2904
2905 .L1:
2906 bx r2
2907
2908 The trampoline 'bx r2' doesn't belong to main. */
2909
2910 static CORE_ADDR
2911 arm_skip_bx_reg (struct frame_info *frame, CORE_ADDR pc)
2912 {
2913 /* The heuristics of recognizing such trampoline is that FRAME is
2914 executing in Thumb mode and the instruction on PC is 'bx Rm'. */
2915 if (arm_frame_is_thumb (frame))
2916 {
2917 gdb_byte buf[2];
2918
2919 if (target_read_memory (pc, buf, 2) == 0)
2920 {
2921 struct gdbarch *gdbarch = get_frame_arch (frame);
2922 enum bfd_endian byte_order_for_code
2923 = gdbarch_byte_order_for_code (gdbarch);
2924 uint16_t insn
2925 = extract_unsigned_integer (buf, 2, byte_order_for_code);
2926
2927 if ((insn & 0xff80) == 0x4700) /* bx <Rm> */
2928 {
2929 CORE_ADDR dest
2930 = get_frame_register_unsigned (frame, bits (insn, 3, 6));
2931
2932 /* Clear the LSB so that gdb core sets step-resume
2933 breakpoint at the right address. */
2934 return UNMAKE_THUMB_ADDR (dest);
2935 }
2936 }
2937 }
2938
2939 return 0;
2940 }
2941
2942 static struct arm_prologue_cache *
2943 arm_make_stub_cache (struct frame_info *this_frame)
2944 {
2945 struct arm_prologue_cache *cache;
2946
2947 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2948 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2949
2950 cache->prev_sp = get_frame_register_unsigned (this_frame, ARM_SP_REGNUM);
2951
2952 return cache;
2953 }
2954
2955 /* Our frame ID for a stub frame is the current SP and LR. */
2956
2957 static void
2958 arm_stub_this_id (struct frame_info *this_frame,
2959 void **this_cache,
2960 struct frame_id *this_id)
2961 {
2962 struct arm_prologue_cache *cache;
2963
2964 if (*this_cache == NULL)
2965 *this_cache = arm_make_stub_cache (this_frame);
2966 cache = *this_cache;
2967
2968 *this_id = frame_id_build (cache->prev_sp, get_frame_pc (this_frame));
2969 }
2970
2971 static int
2972 arm_stub_unwind_sniffer (const struct frame_unwind *self,
2973 struct frame_info *this_frame,
2974 void **this_prologue_cache)
2975 {
2976 CORE_ADDR addr_in_block;
2977 gdb_byte dummy[4];
2978 CORE_ADDR pc, start_addr;
2979 const char *name;
2980
2981 addr_in_block = get_frame_address_in_block (this_frame);
2982 pc = get_frame_pc (this_frame);
2983 if (in_plt_section (addr_in_block)
2984 /* We also use the stub winder if the target memory is unreadable
2985 to avoid having the prologue unwinder trying to read it. */
2986 || target_read_memory (pc, dummy, 4) != 0)
2987 return 1;
2988
2989 if (find_pc_partial_function (pc, &name, &start_addr, NULL) == 0
2990 && arm_skip_bx_reg (this_frame, pc) != 0)
2991 return 1;
2992
2993 return 0;
2994 }
2995
2996 struct frame_unwind arm_stub_unwind = {
2997 NORMAL_FRAME,
2998 default_frame_unwind_stop_reason,
2999 arm_stub_this_id,
3000 arm_prologue_prev_register,
3001 NULL,
3002 arm_stub_unwind_sniffer
3003 };
3004
3005 /* Put here the code to store, into CACHE->saved_regs, the addresses
3006 of the saved registers of frame described by THIS_FRAME. CACHE is
3007 returned. */
3008
3009 static struct arm_prologue_cache *
3010 arm_m_exception_cache (struct frame_info *this_frame)
3011 {
3012 struct gdbarch *gdbarch = get_frame_arch (this_frame);
3013 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3014 struct arm_prologue_cache *cache;
3015 CORE_ADDR unwound_sp;
3016 LONGEST xpsr;
3017
3018 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
3019 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
3020
3021 unwound_sp = get_frame_register_unsigned (this_frame,
3022 ARM_SP_REGNUM);
3023
3024 /* The hardware saves eight 32-bit words, comprising xPSR,
3025 ReturnAddress, LR (R14), R12, R3, R2, R1, R0. See details in
3026 "B1.5.6 Exception entry behavior" in
3027 "ARMv7-M Architecture Reference Manual". */
3028 cache->saved_regs[0].addr = unwound_sp;
3029 cache->saved_regs[1].addr = unwound_sp + 4;
3030 cache->saved_regs[2].addr = unwound_sp + 8;
3031 cache->saved_regs[3].addr = unwound_sp + 12;
3032 cache->saved_regs[12].addr = unwound_sp + 16;
3033 cache->saved_regs[14].addr = unwound_sp + 20;
3034 cache->saved_regs[15].addr = unwound_sp + 24;
3035 cache->saved_regs[ARM_PS_REGNUM].addr = unwound_sp + 28;
3036
3037 /* If bit 9 of the saved xPSR is set, then there is a four-byte
3038 aligner between the top of the 32-byte stack frame and the
3039 previous context's stack pointer. */
3040 cache->prev_sp = unwound_sp + 32;
3041 if (safe_read_memory_integer (unwound_sp + 28, 4, byte_order, &xpsr)
3042 && (xpsr & (1 << 9)) != 0)
3043 cache->prev_sp += 4;
3044
3045 return cache;
3046 }
3047
3048 /* Implementation of function hook 'this_id' in
3049 'struct frame_uwnind'. */
3050
3051 static void
3052 arm_m_exception_this_id (struct frame_info *this_frame,
3053 void **this_cache,
3054 struct frame_id *this_id)
3055 {
3056 struct arm_prologue_cache *cache;
3057
3058 if (*this_cache == NULL)
3059 *this_cache = arm_m_exception_cache (this_frame);
3060 cache = *this_cache;
3061
3062 /* Our frame ID for a stub frame is the current SP and LR. */
3063 *this_id = frame_id_build (cache->prev_sp,
3064 get_frame_pc (this_frame));
3065 }
3066
3067 /* Implementation of function hook 'prev_register' in
3068 'struct frame_uwnind'. */
3069
3070 static struct value *
3071 arm_m_exception_prev_register (struct frame_info *this_frame,
3072 void **this_cache,
3073 int prev_regnum)
3074 {
3075 struct gdbarch *gdbarch = get_frame_arch (this_frame);
3076 struct arm_prologue_cache *cache;
3077
3078 if (*this_cache == NULL)
3079 *this_cache = arm_m_exception_cache (this_frame);
3080 cache = *this_cache;
3081
3082 /* The value was already reconstructed into PREV_SP. */
3083 if (prev_regnum == ARM_SP_REGNUM)
3084 return frame_unwind_got_constant (this_frame, prev_regnum,
3085 cache->prev_sp);
3086
3087 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
3088 prev_regnum);
3089 }
3090
3091 /* Implementation of function hook 'sniffer' in
3092 'struct frame_uwnind'. */
3093
3094 static int
3095 arm_m_exception_unwind_sniffer (const struct frame_unwind *self,
3096 struct frame_info *this_frame,
3097 void **this_prologue_cache)
3098 {
3099 CORE_ADDR this_pc = get_frame_pc (this_frame);
3100
3101 /* No need to check is_m; this sniffer is only registered for
3102 M-profile architectures. */
3103
3104 /* Exception frames return to one of these magic PCs. Other values
3105 are not defined as of v7-M. See details in "B1.5.8 Exception
3106 return behavior" in "ARMv7-M Architecture Reference Manual". */
3107 if (this_pc == 0xfffffff1 || this_pc == 0xfffffff9
3108 || this_pc == 0xfffffffd)
3109 return 1;
3110
3111 return 0;
3112 }
3113
3114 /* Frame unwinder for M-profile exceptions. */
3115
3116 struct frame_unwind arm_m_exception_unwind =
3117 {
3118 SIGTRAMP_FRAME,
3119 default_frame_unwind_stop_reason,
3120 arm_m_exception_this_id,
3121 arm_m_exception_prev_register,
3122 NULL,
3123 arm_m_exception_unwind_sniffer
3124 };
3125
3126 static CORE_ADDR
3127 arm_normal_frame_base (struct frame_info *this_frame, void **this_cache)
3128 {
3129 struct arm_prologue_cache *cache;
3130
3131 if (*this_cache == NULL)
3132 *this_cache = arm_make_prologue_cache (this_frame);
3133 cache = *this_cache;
3134
3135 return cache->prev_sp - cache->framesize;
3136 }
3137
3138 struct frame_base arm_normal_base = {
3139 &arm_prologue_unwind,
3140 arm_normal_frame_base,
3141 arm_normal_frame_base,
3142 arm_normal_frame_base
3143 };
3144
3145 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
3146 dummy frame. The frame ID's base needs to match the TOS value
3147 saved by save_dummy_frame_tos() and returned from
3148 arm_push_dummy_call, and the PC needs to match the dummy frame's
3149 breakpoint. */
3150
3151 static struct frame_id
3152 arm_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
3153 {
3154 return frame_id_build (get_frame_register_unsigned (this_frame,
3155 ARM_SP_REGNUM),
3156 get_frame_pc (this_frame));
3157 }
3158
3159 /* Given THIS_FRAME, find the previous frame's resume PC (which will
3160 be used to construct the previous frame's ID, after looking up the
3161 containing function). */
3162
3163 static CORE_ADDR
3164 arm_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
3165 {
3166 CORE_ADDR pc;
3167 pc = frame_unwind_register_unsigned (this_frame, ARM_PC_REGNUM);
3168 return arm_addr_bits_remove (gdbarch, pc);
3169 }
3170
3171 static CORE_ADDR
3172 arm_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
3173 {
3174 return frame_unwind_register_unsigned (this_frame, ARM_SP_REGNUM);
3175 }
3176
3177 static struct value *
3178 arm_dwarf2_prev_register (struct frame_info *this_frame, void **this_cache,
3179 int regnum)
3180 {
3181 struct gdbarch * gdbarch = get_frame_arch (this_frame);
3182 CORE_ADDR lr, cpsr;
3183 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
3184
3185 switch (regnum)
3186 {
3187 case ARM_PC_REGNUM:
3188 /* The PC is normally copied from the return column, which
3189 describes saves of LR. However, that version may have an
3190 extra bit set to indicate Thumb state. The bit is not
3191 part of the PC. */
3192 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
3193 return frame_unwind_got_constant (this_frame, regnum,
3194 arm_addr_bits_remove (gdbarch, lr));
3195
3196 case ARM_PS_REGNUM:
3197 /* Reconstruct the T bit; see arm_prologue_prev_register for details. */
3198 cpsr = get_frame_register_unsigned (this_frame, regnum);
3199 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
3200 if (IS_THUMB_ADDR (lr))
3201 cpsr |= t_bit;
3202 else
3203 cpsr &= ~t_bit;
3204 return frame_unwind_got_constant (this_frame, regnum, cpsr);
3205
3206 default:
3207 internal_error (__FILE__, __LINE__,
3208 _("Unexpected register %d"), regnum);
3209 }
3210 }
3211
3212 static void
3213 arm_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
3214 struct dwarf2_frame_state_reg *reg,
3215 struct frame_info *this_frame)
3216 {
3217 switch (regnum)
3218 {
3219 case ARM_PC_REGNUM:
3220 case ARM_PS_REGNUM:
3221 reg->how = DWARF2_FRAME_REG_FN;
3222 reg->loc.fn = arm_dwarf2_prev_register;
3223 break;
3224 case ARM_SP_REGNUM:
3225 reg->how = DWARF2_FRAME_REG_CFA;
3226 break;
3227 }
3228 }
3229
3230 /* Return true if we are in the function's epilogue, i.e. after the
3231 instruction that destroyed the function's stack frame. */
3232
3233 static int
3234 thumb_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3235 {
3236 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3237 unsigned int insn, insn2;
3238 int found_return = 0, found_stack_adjust = 0;
3239 CORE_ADDR func_start, func_end;
3240 CORE_ADDR scan_pc;
3241 gdb_byte buf[4];
3242
3243 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3244 return 0;
3245
3246 /* The epilogue is a sequence of instructions along the following lines:
3247
3248 - add stack frame size to SP or FP
3249 - [if frame pointer used] restore SP from FP
3250 - restore registers from SP [may include PC]
3251 - a return-type instruction [if PC wasn't already restored]
3252
3253 In a first pass, we scan forward from the current PC and verify the
3254 instructions we find as compatible with this sequence, ending in a
3255 return instruction.
3256
3257 However, this is not sufficient to distinguish indirect function calls
3258 within a function from indirect tail calls in the epilogue in some cases.
3259 Therefore, if we didn't already find any SP-changing instruction during
3260 forward scan, we add a backward scanning heuristic to ensure we actually
3261 are in the epilogue. */
3262
3263 scan_pc = pc;
3264 while (scan_pc < func_end && !found_return)
3265 {
3266 if (target_read_memory (scan_pc, buf, 2))
3267 break;
3268
3269 scan_pc += 2;
3270 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3271
3272 if ((insn & 0xff80) == 0x4700) /* bx <Rm> */
3273 found_return = 1;
3274 else if (insn == 0x46f7) /* mov pc, lr */
3275 found_return = 1;
3276 else if (thumb_instruction_restores_sp (insn))
3277 {
3278 found_stack_adjust = 1;
3279 if ((insn & 0xfe00) == 0xbd00) /* pop <registers, PC> */
3280 found_return = 1;
3281 }
3282 else if (thumb_insn_size (insn) == 4) /* 32-bit Thumb-2 instruction */
3283 {
3284 if (target_read_memory (scan_pc, buf, 2))
3285 break;
3286
3287 scan_pc += 2;
3288 insn2 = extract_unsigned_integer (buf, 2, byte_order_for_code);
3289
3290 if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3291 {
3292 found_stack_adjust = 1;
3293 if (insn2 & 0x8000) /* <registers> include PC. */
3294 found_return = 1;
3295 }
3296 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3297 && (insn2 & 0x0fff) == 0x0b04)
3298 {
3299 found_stack_adjust = 1;
3300 if ((insn2 & 0xf000) == 0xf000) /* <Rt> is PC. */
3301 found_return = 1;
3302 }
3303 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3304 && (insn2 & 0x0e00) == 0x0a00)
3305 found_stack_adjust = 1;
3306 else
3307 break;
3308 }
3309 else
3310 break;
3311 }
3312
3313 if (!found_return)
3314 return 0;
3315
3316 /* Since any instruction in the epilogue sequence, with the possible
3317 exception of return itself, updates the stack pointer, we need to
3318 scan backwards for at most one instruction. Try either a 16-bit or
3319 a 32-bit instruction. This is just a heuristic, so we do not worry
3320 too much about false positives. */
3321
3322 if (!found_stack_adjust)
3323 {
3324 if (pc - 4 < func_start)
3325 return 0;
3326 if (target_read_memory (pc - 4, buf, 4))
3327 return 0;
3328
3329 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3330 insn2 = extract_unsigned_integer (buf + 2, 2, byte_order_for_code);
3331
3332 if (thumb_instruction_restores_sp (insn2))
3333 found_stack_adjust = 1;
3334 else if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3335 found_stack_adjust = 1;
3336 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3337 && (insn2 & 0x0fff) == 0x0b04)
3338 found_stack_adjust = 1;
3339 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3340 && (insn2 & 0x0e00) == 0x0a00)
3341 found_stack_adjust = 1;
3342 }
3343
3344 return found_stack_adjust;
3345 }
3346
3347 /* Return true if we are in the function's epilogue, i.e. after the
3348 instruction that destroyed the function's stack frame. */
3349
3350 static int
3351 arm_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3352 {
3353 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3354 unsigned int insn;
3355 int found_return, found_stack_adjust;
3356 CORE_ADDR func_start, func_end;
3357
3358 if (arm_pc_is_thumb (gdbarch, pc))
3359 return thumb_in_function_epilogue_p (gdbarch, pc);
3360
3361 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3362 return 0;
3363
3364 /* We are in the epilogue if the previous instruction was a stack
3365 adjustment and the next instruction is a possible return (bx, mov
3366 pc, or pop). We could have to scan backwards to find the stack
3367 adjustment, or forwards to find the return, but this is a decent
3368 approximation. First scan forwards. */
3369
3370 found_return = 0;
3371 insn = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
3372 if (bits (insn, 28, 31) != INST_NV)
3373 {
3374 if ((insn & 0x0ffffff0) == 0x012fff10)
3375 /* BX. */
3376 found_return = 1;
3377 else if ((insn & 0x0ffffff0) == 0x01a0f000)
3378 /* MOV PC. */
3379 found_return = 1;
3380 else if ((insn & 0x0fff0000) == 0x08bd0000
3381 && (insn & 0x0000c000) != 0)
3382 /* POP (LDMIA), including PC or LR. */
3383 found_return = 1;
3384 }
3385
3386 if (!found_return)
3387 return 0;
3388
3389 /* Scan backwards. This is just a heuristic, so do not worry about
3390 false positives from mode changes. */
3391
3392 if (pc < func_start + 4)
3393 return 0;
3394
3395 found_stack_adjust = 0;
3396 insn = read_memory_unsigned_integer (pc - 4, 4, byte_order_for_code);
3397 if (bits (insn, 28, 31) != INST_NV)
3398 {
3399 if ((insn & 0x0df0f000) == 0x0080d000)
3400 /* ADD SP (register or immediate). */
3401 found_stack_adjust = 1;
3402 else if ((insn & 0x0df0f000) == 0x0040d000)
3403 /* SUB SP (register or immediate). */
3404 found_stack_adjust = 1;
3405 else if ((insn & 0x0ffffff0) == 0x01a0d000)
3406 /* MOV SP. */
3407 found_stack_adjust = 1;
3408 else if ((insn & 0x0fff0000) == 0x08bd0000)
3409 /* POP (LDMIA). */
3410 found_stack_adjust = 1;
3411 else if ((insn & 0x0fff0000) == 0x049d0000)
3412 /* POP of a single register. */
3413 found_stack_adjust = 1;
3414 }
3415
3416 if (found_stack_adjust)
3417 return 1;
3418
3419 return 0;
3420 }
3421
3422
3423 /* When arguments must be pushed onto the stack, they go on in reverse
3424 order. The code below implements a FILO (stack) to do this. */
3425
3426 struct stack_item
3427 {
3428 int len;
3429 struct stack_item *prev;
3430 void *data;
3431 };
3432
3433 static struct stack_item *
3434 push_stack_item (struct stack_item *prev, const void *contents, int len)
3435 {
3436 struct stack_item *si;
3437 si = xmalloc (sizeof (struct stack_item));
3438 si->data = xmalloc (len);
3439 si->len = len;
3440 si->prev = prev;
3441 memcpy (si->data, contents, len);
3442 return si;
3443 }
3444
3445 static struct stack_item *
3446 pop_stack_item (struct stack_item *si)
3447 {
3448 struct stack_item *dead = si;
3449 si = si->prev;
3450 xfree (dead->data);
3451 xfree (dead);
3452 return si;
3453 }
3454
3455
3456 /* Return the alignment (in bytes) of the given type. */
3457
3458 static int
3459 arm_type_align (struct type *t)
3460 {
3461 int n;
3462 int align;
3463 int falign;
3464
3465 t = check_typedef (t);
3466 switch (TYPE_CODE (t))
3467 {
3468 default:
3469 /* Should never happen. */
3470 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
3471 return 4;
3472
3473 case TYPE_CODE_PTR:
3474 case TYPE_CODE_ENUM:
3475 case TYPE_CODE_INT:
3476 case TYPE_CODE_FLT:
3477 case TYPE_CODE_SET:
3478 case TYPE_CODE_RANGE:
3479 case TYPE_CODE_REF:
3480 case TYPE_CODE_CHAR:
3481 case TYPE_CODE_BOOL:
3482 return TYPE_LENGTH (t);
3483
3484 case TYPE_CODE_ARRAY:
3485 case TYPE_CODE_COMPLEX:
3486 /* TODO: What about vector types? */
3487 return arm_type_align (TYPE_TARGET_TYPE (t));
3488
3489 case TYPE_CODE_STRUCT:
3490 case TYPE_CODE_UNION:
3491 align = 1;
3492 for (n = 0; n < TYPE_NFIELDS (t); n++)
3493 {
3494 falign = arm_type_align (TYPE_FIELD_TYPE (t, n));
3495 if (falign > align)
3496 align = falign;
3497 }
3498 return align;
3499 }
3500 }
3501
3502 /* Possible base types for a candidate for passing and returning in
3503 VFP registers. */
3504
3505 enum arm_vfp_cprc_base_type
3506 {
3507 VFP_CPRC_UNKNOWN,
3508 VFP_CPRC_SINGLE,
3509 VFP_CPRC_DOUBLE,
3510 VFP_CPRC_VEC64,
3511 VFP_CPRC_VEC128
3512 };
3513
3514 /* The length of one element of base type B. */
3515
3516 static unsigned
3517 arm_vfp_cprc_unit_length (enum arm_vfp_cprc_base_type b)
3518 {
3519 switch (b)
3520 {
3521 case VFP_CPRC_SINGLE:
3522 return 4;
3523 case VFP_CPRC_DOUBLE:
3524 return 8;
3525 case VFP_CPRC_VEC64:
3526 return 8;
3527 case VFP_CPRC_VEC128:
3528 return 16;
3529 default:
3530 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3531 (int) b);
3532 }
3533 }
3534
3535 /* The character ('s', 'd' or 'q') for the type of VFP register used
3536 for passing base type B. */
3537
3538 static int
3539 arm_vfp_cprc_reg_char (enum arm_vfp_cprc_base_type b)
3540 {
3541 switch (b)
3542 {
3543 case VFP_CPRC_SINGLE:
3544 return 's';
3545 case VFP_CPRC_DOUBLE:
3546 return 'd';
3547 case VFP_CPRC_VEC64:
3548 return 'd';
3549 case VFP_CPRC_VEC128:
3550 return 'q';
3551 default:
3552 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3553 (int) b);
3554 }
3555 }
3556
3557 /* Determine whether T may be part of a candidate for passing and
3558 returning in VFP registers, ignoring the limit on the total number
3559 of components. If *BASE_TYPE is VFP_CPRC_UNKNOWN, set it to the
3560 classification of the first valid component found; if it is not
3561 VFP_CPRC_UNKNOWN, all components must have the same classification
3562 as *BASE_TYPE. If it is found that T contains a type not permitted
3563 for passing and returning in VFP registers, a type differently
3564 classified from *BASE_TYPE, or two types differently classified
3565 from each other, return -1, otherwise return the total number of
3566 base-type elements found (possibly 0 in an empty structure or
3567 array). Vectors and complex types are not currently supported,
3568 matching the generic AAPCS support. */
3569
3570 static int
3571 arm_vfp_cprc_sub_candidate (struct type *t,
3572 enum arm_vfp_cprc_base_type *base_type)
3573 {
3574 t = check_typedef (t);
3575 switch (TYPE_CODE (t))
3576 {
3577 case TYPE_CODE_FLT:
3578 switch (TYPE_LENGTH (t))
3579 {
3580 case 4:
3581 if (*base_type == VFP_CPRC_UNKNOWN)
3582 *base_type = VFP_CPRC_SINGLE;
3583 else if (*base_type != VFP_CPRC_SINGLE)
3584 return -1;
3585 return 1;
3586
3587 case 8:
3588 if (*base_type == VFP_CPRC_UNKNOWN)
3589 *base_type = VFP_CPRC_DOUBLE;
3590 else if (*base_type != VFP_CPRC_DOUBLE)
3591 return -1;
3592 return 1;
3593
3594 default:
3595 return -1;
3596 }
3597 break;
3598
3599 case TYPE_CODE_ARRAY:
3600 {
3601 int count;
3602 unsigned unitlen;
3603 count = arm_vfp_cprc_sub_candidate (TYPE_TARGET_TYPE (t), base_type);
3604 if (count == -1)
3605 return -1;
3606 if (TYPE_LENGTH (t) == 0)
3607 {
3608 gdb_assert (count == 0);
3609 return 0;
3610 }
3611 else if (count == 0)
3612 return -1;
3613 unitlen = arm_vfp_cprc_unit_length (*base_type);
3614 gdb_assert ((TYPE_LENGTH (t) % unitlen) == 0);
3615 return TYPE_LENGTH (t) / unitlen;
3616 }
3617 break;
3618
3619 case TYPE_CODE_STRUCT:
3620 {
3621 int count = 0;
3622 unsigned unitlen;
3623 int i;
3624 for (i = 0; i < TYPE_NFIELDS (t); i++)
3625 {
3626 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
3627 base_type);
3628 if (sub_count == -1)
3629 return -1;
3630 count += sub_count;
3631 }
3632 if (TYPE_LENGTH (t) == 0)
3633 {
3634 gdb_assert (count == 0);
3635 return 0;
3636 }
3637 else if (count == 0)
3638 return -1;
3639 unitlen = arm_vfp_cprc_unit_length (*base_type);
3640 if (TYPE_LENGTH (t) != unitlen * count)
3641 return -1;
3642 return count;
3643 }
3644
3645 case TYPE_CODE_UNION:
3646 {
3647 int count = 0;
3648 unsigned unitlen;
3649 int i;
3650 for (i = 0; i < TYPE_NFIELDS (t); i++)
3651 {
3652 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
3653 base_type);
3654 if (sub_count == -1)
3655 return -1;
3656 count = (count > sub_count ? count : sub_count);
3657 }
3658 if (TYPE_LENGTH (t) == 0)
3659 {
3660 gdb_assert (count == 0);
3661 return 0;
3662 }
3663 else if (count == 0)
3664 return -1;
3665 unitlen = arm_vfp_cprc_unit_length (*base_type);
3666 if (TYPE_LENGTH (t) != unitlen * count)
3667 return -1;
3668 return count;
3669 }
3670
3671 default:
3672 break;
3673 }
3674
3675 return -1;
3676 }
3677
3678 /* Determine whether T is a VFP co-processor register candidate (CPRC)
3679 if passed to or returned from a non-variadic function with the VFP
3680 ABI in effect. Return 1 if it is, 0 otherwise. If it is, set
3681 *BASE_TYPE to the base type for T and *COUNT to the number of
3682 elements of that base type before returning. */
3683
3684 static int
3685 arm_vfp_call_candidate (struct type *t, enum arm_vfp_cprc_base_type *base_type,
3686 int *count)
3687 {
3688 enum arm_vfp_cprc_base_type b = VFP_CPRC_UNKNOWN;
3689 int c = arm_vfp_cprc_sub_candidate (t, &b);
3690 if (c <= 0 || c > 4)
3691 return 0;
3692 *base_type = b;
3693 *count = c;
3694 return 1;
3695 }
3696
3697 /* Return 1 if the VFP ABI should be used for passing arguments to and
3698 returning values from a function of type FUNC_TYPE, 0
3699 otherwise. */
3700
3701 static int
3702 arm_vfp_abi_for_function (struct gdbarch *gdbarch, struct type *func_type)
3703 {
3704 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3705 /* Variadic functions always use the base ABI. Assume that functions
3706 without debug info are not variadic. */
3707 if (func_type && TYPE_VARARGS (check_typedef (func_type)))
3708 return 0;
3709 /* The VFP ABI is only supported as a variant of AAPCS. */
3710 if (tdep->arm_abi != ARM_ABI_AAPCS)
3711 return 0;
3712 return gdbarch_tdep (gdbarch)->fp_model == ARM_FLOAT_VFP;
3713 }
3714
3715 /* We currently only support passing parameters in integer registers, which
3716 conforms with GCC's default model, and VFP argument passing following
3717 the VFP variant of AAPCS. Several other variants exist and
3718 we should probably support some of them based on the selected ABI. */
3719
3720 static CORE_ADDR
3721 arm_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
3722 struct regcache *regcache, CORE_ADDR bp_addr, int nargs,
3723 struct value **args, CORE_ADDR sp, int struct_return,
3724 CORE_ADDR struct_addr)
3725 {
3726 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3727 int argnum;
3728 int argreg;
3729 int nstack;
3730 struct stack_item *si = NULL;
3731 int use_vfp_abi;
3732 struct type *ftype;
3733 unsigned vfp_regs_free = (1 << 16) - 1;
3734
3735 /* Determine the type of this function and whether the VFP ABI
3736 applies. */
3737 ftype = check_typedef (value_type (function));
3738 if (TYPE_CODE (ftype) == TYPE_CODE_PTR)
3739 ftype = check_typedef (TYPE_TARGET_TYPE (ftype));
3740 use_vfp_abi = arm_vfp_abi_for_function (gdbarch, ftype);
3741
3742 /* Set the return address. For the ARM, the return breakpoint is
3743 always at BP_ADDR. */
3744 if (arm_pc_is_thumb (gdbarch, bp_addr))
3745 bp_addr |= 1;
3746 regcache_cooked_write_unsigned (regcache, ARM_LR_REGNUM, bp_addr);
3747
3748 /* Walk through the list of args and determine how large a temporary
3749 stack is required. Need to take care here as structs may be
3750 passed on the stack, and we have to push them. */
3751 nstack = 0;
3752
3753 argreg = ARM_A1_REGNUM;
3754 nstack = 0;
3755
3756 /* The struct_return pointer occupies the first parameter
3757 passing register. */
3758 if (struct_return)
3759 {
3760 if (arm_debug)
3761 fprintf_unfiltered (gdb_stdlog, "struct return in %s = %s\n",
3762 gdbarch_register_name (gdbarch, argreg),
3763 paddress (gdbarch, struct_addr));
3764 regcache_cooked_write_unsigned (regcache, argreg, struct_addr);
3765 argreg++;
3766 }
3767
3768 for (argnum = 0; argnum < nargs; argnum++)
3769 {
3770 int len;
3771 struct type *arg_type;
3772 struct type *target_type;
3773 enum type_code typecode;
3774 const bfd_byte *val;
3775 int align;
3776 enum arm_vfp_cprc_base_type vfp_base_type;
3777 int vfp_base_count;
3778 int may_use_core_reg = 1;
3779
3780 arg_type = check_typedef (value_type (args[argnum]));
3781 len = TYPE_LENGTH (arg_type);
3782 target_type = TYPE_TARGET_TYPE (arg_type);
3783 typecode = TYPE_CODE (arg_type);
3784 val = value_contents (args[argnum]);
3785
3786 align = arm_type_align (arg_type);
3787 /* Round alignment up to a whole number of words. */
3788 align = (align + INT_REGISTER_SIZE - 1) & ~(INT_REGISTER_SIZE - 1);
3789 /* Different ABIs have different maximum alignments. */
3790 if (gdbarch_tdep (gdbarch)->arm_abi == ARM_ABI_APCS)
3791 {
3792 /* The APCS ABI only requires word alignment. */
3793 align = INT_REGISTER_SIZE;
3794 }
3795 else
3796 {
3797 /* The AAPCS requires at most doubleword alignment. */
3798 if (align > INT_REGISTER_SIZE * 2)
3799 align = INT_REGISTER_SIZE * 2;
3800 }
3801
3802 if (use_vfp_abi
3803 && arm_vfp_call_candidate (arg_type, &vfp_base_type,
3804 &vfp_base_count))
3805 {
3806 int regno;
3807 int unit_length;
3808 int shift;
3809 unsigned mask;
3810
3811 /* Because this is a CPRC it cannot go in a core register or
3812 cause a core register to be skipped for alignment.
3813 Either it goes in VFP registers and the rest of this loop
3814 iteration is skipped for this argument, or it goes on the
3815 stack (and the stack alignment code is correct for this
3816 case). */
3817 may_use_core_reg = 0;
3818
3819 unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
3820 shift = unit_length / 4;
3821 mask = (1 << (shift * vfp_base_count)) - 1;
3822 for (regno = 0; regno < 16; regno += shift)
3823 if (((vfp_regs_free >> regno) & mask) == mask)
3824 break;
3825
3826 if (regno < 16)
3827 {
3828 int reg_char;
3829 int reg_scaled;
3830 int i;
3831
3832 vfp_regs_free &= ~(mask << regno);
3833 reg_scaled = regno / shift;
3834 reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
3835 for (i = 0; i < vfp_base_count; i++)
3836 {
3837 char name_buf[4];
3838 int regnum;
3839 if (reg_char == 'q')
3840 arm_neon_quad_write (gdbarch, regcache, reg_scaled + i,
3841 val + i * unit_length);
3842 else
3843 {
3844 xsnprintf (name_buf, sizeof (name_buf), "%c%d",
3845 reg_char, reg_scaled + i);
3846 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
3847 strlen (name_buf));
3848 regcache_cooked_write (regcache, regnum,
3849 val + i * unit_length);
3850 }
3851 }
3852 continue;
3853 }
3854 else
3855 {
3856 /* This CPRC could not go in VFP registers, so all VFP
3857 registers are now marked as used. */
3858 vfp_regs_free = 0;
3859 }
3860 }
3861
3862 /* Push stack padding for dowubleword alignment. */
3863 if (nstack & (align - 1))
3864 {
3865 si = push_stack_item (si, val, INT_REGISTER_SIZE);
3866 nstack += INT_REGISTER_SIZE;
3867 }
3868
3869 /* Doubleword aligned quantities must go in even register pairs. */
3870 if (may_use_core_reg
3871 && argreg <= ARM_LAST_ARG_REGNUM
3872 && align > INT_REGISTER_SIZE
3873 && argreg & 1)
3874 argreg++;
3875
3876 /* If the argument is a pointer to a function, and it is a
3877 Thumb function, create a LOCAL copy of the value and set
3878 the THUMB bit in it. */
3879 if (TYPE_CODE_PTR == typecode
3880 && target_type != NULL
3881 && TYPE_CODE_FUNC == TYPE_CODE (check_typedef (target_type)))
3882 {
3883 CORE_ADDR regval = extract_unsigned_integer (val, len, byte_order);
3884 if (arm_pc_is_thumb (gdbarch, regval))
3885 {
3886 bfd_byte *copy = alloca (len);
3887 store_unsigned_integer (copy, len, byte_order,
3888 MAKE_THUMB_ADDR (regval));
3889 val = copy;
3890 }
3891 }
3892
3893 /* Copy the argument to general registers or the stack in
3894 register-sized pieces. Large arguments are split between
3895 registers and stack. */
3896 while (len > 0)
3897 {
3898 int partial_len = len < INT_REGISTER_SIZE ? len : INT_REGISTER_SIZE;
3899
3900 if (may_use_core_reg && argreg <= ARM_LAST_ARG_REGNUM)
3901 {
3902 /* The argument is being passed in a general purpose
3903 register. */
3904 CORE_ADDR regval
3905 = extract_unsigned_integer (val, partial_len, byte_order);
3906 if (byte_order == BFD_ENDIAN_BIG)
3907 regval <<= (INT_REGISTER_SIZE - partial_len) * 8;
3908 if (arm_debug)
3909 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
3910 argnum,
3911 gdbarch_register_name
3912 (gdbarch, argreg),
3913 phex (regval, INT_REGISTER_SIZE));
3914 regcache_cooked_write_unsigned (regcache, argreg, regval);
3915 argreg++;
3916 }
3917 else
3918 {
3919 /* Push the arguments onto the stack. */
3920 if (arm_debug)
3921 fprintf_unfiltered (gdb_stdlog, "arg %d @ sp + %d\n",
3922 argnum, nstack);
3923 si = push_stack_item (si, val, INT_REGISTER_SIZE);
3924 nstack += INT_REGISTER_SIZE;
3925 }
3926
3927 len -= partial_len;
3928 val += partial_len;
3929 }
3930 }
3931 /* If we have an odd number of words to push, then decrement the stack
3932 by one word now, so first stack argument will be dword aligned. */
3933 if (nstack & 4)
3934 sp -= 4;
3935
3936 while (si)
3937 {
3938 sp -= si->len;
3939 write_memory (sp, si->data, si->len);
3940 si = pop_stack_item (si);
3941 }
3942
3943 /* Finally, update teh SP register. */
3944 regcache_cooked_write_unsigned (regcache, ARM_SP_REGNUM, sp);
3945
3946 return sp;
3947 }
3948
3949
3950 /* Always align the frame to an 8-byte boundary. This is required on
3951 some platforms and harmless on the rest. */
3952
3953 static CORE_ADDR
3954 arm_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3955 {
3956 /* Align the stack to eight bytes. */
3957 return sp & ~ (CORE_ADDR) 7;
3958 }
3959
3960 static void
3961 print_fpu_flags (struct ui_file *file, int flags)
3962 {
3963 if (flags & (1 << 0))
3964 fputs_filtered ("IVO ", file);
3965 if (flags & (1 << 1))
3966 fputs_filtered ("DVZ ", file);
3967 if (flags & (1 << 2))
3968 fputs_filtered ("OFL ", file);
3969 if (flags & (1 << 3))
3970 fputs_filtered ("UFL ", file);
3971 if (flags & (1 << 4))
3972 fputs_filtered ("INX ", file);
3973 fputc_filtered ('\n', file);
3974 }
3975
3976 /* Print interesting information about the floating point processor
3977 (if present) or emulator. */
3978 static void
3979 arm_print_float_info (struct gdbarch *gdbarch, struct ui_file *file,
3980 struct frame_info *frame, const char *args)
3981 {
3982 unsigned long status = get_frame_register_unsigned (frame, ARM_FPS_REGNUM);
3983 int type;
3984
3985 type = (status >> 24) & 127;
3986 if (status & (1 << 31))
3987 fprintf_filtered (file, _("Hardware FPU type %d\n"), type);
3988 else
3989 fprintf_filtered (file, _("Software FPU type %d\n"), type);
3990 /* i18n: [floating point unit] mask */
3991 fputs_filtered (_("mask: "), file);
3992 print_fpu_flags (file, status >> 16);
3993 /* i18n: [floating point unit] flags */
3994 fputs_filtered (_("flags: "), file);
3995 print_fpu_flags (file, status);
3996 }
3997
3998 /* Construct the ARM extended floating point type. */
3999 static struct type *
4000 arm_ext_type (struct gdbarch *gdbarch)
4001 {
4002 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
4003
4004 if (!tdep->arm_ext_type)
4005 tdep->arm_ext_type
4006 = arch_float_type (gdbarch, -1, "builtin_type_arm_ext",
4007 floatformats_arm_ext);
4008
4009 return tdep->arm_ext_type;
4010 }
4011
4012 static struct type *
4013 arm_neon_double_type (struct gdbarch *gdbarch)
4014 {
4015 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
4016
4017 if (tdep->neon_double_type == NULL)
4018 {
4019 struct type *t, *elem;
4020
4021 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_d",
4022 TYPE_CODE_UNION);
4023 elem = builtin_type (gdbarch)->builtin_uint8;
4024 append_composite_type_field (t, "u8", init_vector_type (elem, 8));
4025 elem = builtin_type (gdbarch)->builtin_uint16;
4026 append_composite_type_field (t, "u16", init_vector_type (elem, 4));
4027 elem = builtin_type (gdbarch)->builtin_uint32;
4028 append_composite_type_field (t, "u32", init_vector_type (elem, 2));
4029 elem = builtin_type (gdbarch)->builtin_uint64;
4030 append_composite_type_field (t, "u64", elem);
4031 elem = builtin_type (gdbarch)->builtin_float;
4032 append_composite_type_field (t, "f32", init_vector_type (elem, 2));
4033 elem = builtin_type (gdbarch)->builtin_double;
4034 append_composite_type_field (t, "f64", elem);
4035
4036 TYPE_VECTOR (t) = 1;
4037 TYPE_NAME (t) = "neon_d";
4038 tdep->neon_double_type = t;
4039 }
4040
4041 return tdep->neon_double_type;
4042 }
4043
4044 /* FIXME: The vector types are not correctly ordered on big-endian
4045 targets. Just as s0 is the low bits of d0, d0[0] is also the low
4046 bits of d0 - regardless of what unit size is being held in d0. So
4047 the offset of the first uint8 in d0 is 7, but the offset of the
4048 first float is 4. This code works as-is for little-endian
4049 targets. */
4050
4051 static struct type *
4052 arm_neon_quad_type (struct gdbarch *gdbarch)
4053 {
4054 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
4055
4056 if (tdep->neon_quad_type == NULL)
4057 {
4058 struct type *t, *elem;
4059
4060 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_q",
4061 TYPE_CODE_UNION);
4062 elem = builtin_type (gdbarch)->builtin_uint8;
4063 append_composite_type_field (t, "u8", init_vector_type (elem, 16));
4064 elem = builtin_type (gdbarch)->builtin_uint16;
4065 append_composite_type_field (t, "u16", init_vector_type (elem, 8));
4066 elem = builtin_type (gdbarch)->builtin_uint32;
4067 append_composite_type_field (t, "u32", init_vector_type (elem, 4));
4068 elem = builtin_type (gdbarch)->builtin_uint64;
4069 append_composite_type_field (t, "u64", init_vector_type (elem, 2));
4070 elem = builtin_type (gdbarch)->builtin_float;
4071 append_composite_type_field (t, "f32", init_vector_type (elem, 4));
4072 elem = builtin_type (gdbarch)->builtin_double;
4073 append_composite_type_field (t, "f64", init_vector_type (elem, 2));
4074
4075 TYPE_VECTOR (t) = 1;
4076 TYPE_NAME (t) = "neon_q";
4077 tdep->neon_quad_type = t;
4078 }
4079
4080 return tdep->neon_quad_type;
4081 }
4082
4083 /* Return the GDB type object for the "standard" data type of data in
4084 register N. */
4085
4086 static struct type *
4087 arm_register_type (struct gdbarch *gdbarch, int regnum)
4088 {
4089 int num_regs = gdbarch_num_regs (gdbarch);
4090
4091 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
4092 && regnum >= num_regs && regnum < num_regs + 32)
4093 return builtin_type (gdbarch)->builtin_float;
4094
4095 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
4096 && regnum >= num_regs + 32 && regnum < num_regs + 32 + 16)
4097 return arm_neon_quad_type (gdbarch);
4098
4099 /* If the target description has register information, we are only
4100 in this function so that we can override the types of
4101 double-precision registers for NEON. */
4102 if (tdesc_has_registers (gdbarch_target_desc (gdbarch)))
4103 {
4104 struct type *t = tdesc_register_type (gdbarch, regnum);
4105
4106 if (regnum >= ARM_D0_REGNUM && regnum < ARM_D0_REGNUM + 32
4107 && TYPE_CODE (t) == TYPE_CODE_FLT
4108 && gdbarch_tdep (gdbarch)->have_neon)
4109 return arm_neon_double_type (gdbarch);
4110 else
4111 return t;
4112 }
4113
4114 if (regnum >= ARM_F0_REGNUM && regnum < ARM_F0_REGNUM + NUM_FREGS)
4115 {
4116 if (!gdbarch_tdep (gdbarch)->have_fpa_registers)
4117 return builtin_type (gdbarch)->builtin_void;
4118
4119 return arm_ext_type (gdbarch);
4120 }
4121 else if (regnum == ARM_SP_REGNUM)
4122 return builtin_type (gdbarch)->builtin_data_ptr;
4123 else if (regnum == ARM_PC_REGNUM)
4124 return builtin_type (gdbarch)->builtin_func_ptr;
4125 else if (regnum >= ARRAY_SIZE (arm_register_names))
4126 /* These registers are only supported on targets which supply
4127 an XML description. */
4128 return builtin_type (gdbarch)->builtin_int0;
4129 else
4130 return builtin_type (gdbarch)->builtin_uint32;
4131 }
4132
4133 /* Map a DWARF register REGNUM onto the appropriate GDB register
4134 number. */
4135
4136 static int
4137 arm_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
4138 {
4139 /* Core integer regs. */
4140 if (reg >= 0 && reg <= 15)
4141 return reg;
4142
4143 /* Legacy FPA encoding. These were once used in a way which
4144 overlapped with VFP register numbering, so their use is
4145 discouraged, but GDB doesn't support the ARM toolchain
4146 which used them for VFP. */
4147 if (reg >= 16 && reg <= 23)
4148 return ARM_F0_REGNUM + reg - 16;
4149
4150 /* New assignments for the FPA registers. */
4151 if (reg >= 96 && reg <= 103)
4152 return ARM_F0_REGNUM + reg - 96;
4153
4154 /* WMMX register assignments. */
4155 if (reg >= 104 && reg <= 111)
4156 return ARM_WCGR0_REGNUM + reg - 104;
4157
4158 if (reg >= 112 && reg <= 127)
4159 return ARM_WR0_REGNUM + reg - 112;
4160
4161 if (reg >= 192 && reg <= 199)
4162 return ARM_WC0_REGNUM + reg - 192;
4163
4164 /* VFP v2 registers. A double precision value is actually
4165 in d1 rather than s2, but the ABI only defines numbering
4166 for the single precision registers. This will "just work"
4167 in GDB for little endian targets (we'll read eight bytes,
4168 starting in s0 and then progressing to s1), but will be
4169 reversed on big endian targets with VFP. This won't
4170 be a problem for the new Neon quad registers; you're supposed
4171 to use DW_OP_piece for those. */
4172 if (reg >= 64 && reg <= 95)
4173 {
4174 char name_buf[4];
4175
4176 xsnprintf (name_buf, sizeof (name_buf), "s%d", reg - 64);
4177 return user_reg_map_name_to_regnum (gdbarch, name_buf,
4178 strlen (name_buf));
4179 }
4180
4181 /* VFP v3 / Neon registers. This range is also used for VFP v2
4182 registers, except that it now describes d0 instead of s0. */
4183 if (reg >= 256 && reg <= 287)
4184 {
4185 char name_buf[4];
4186
4187 xsnprintf (name_buf, sizeof (name_buf), "d%d", reg - 256);
4188 return user_reg_map_name_to_regnum (gdbarch, name_buf,
4189 strlen (name_buf));
4190 }
4191
4192 return -1;
4193 }
4194
4195 /* Map GDB internal REGNUM onto the Arm simulator register numbers. */
4196 static int
4197 arm_register_sim_regno (struct gdbarch *gdbarch, int regnum)
4198 {
4199 int reg = regnum;
4200 gdb_assert (reg >= 0 && reg < gdbarch_num_regs (gdbarch));
4201
4202 if (regnum >= ARM_WR0_REGNUM && regnum <= ARM_WR15_REGNUM)
4203 return regnum - ARM_WR0_REGNUM + SIM_ARM_IWMMXT_COP0R0_REGNUM;
4204
4205 if (regnum >= ARM_WC0_REGNUM && regnum <= ARM_WC7_REGNUM)
4206 return regnum - ARM_WC0_REGNUM + SIM_ARM_IWMMXT_COP1R0_REGNUM;
4207
4208 if (regnum >= ARM_WCGR0_REGNUM && regnum <= ARM_WCGR7_REGNUM)
4209 return regnum - ARM_WCGR0_REGNUM + SIM_ARM_IWMMXT_COP1R8_REGNUM;
4210
4211 if (reg < NUM_GREGS)
4212 return SIM_ARM_R0_REGNUM + reg;
4213 reg -= NUM_GREGS;
4214
4215 if (reg < NUM_FREGS)
4216 return SIM_ARM_FP0_REGNUM + reg;
4217 reg -= NUM_FREGS;
4218
4219 if (reg < NUM_SREGS)
4220 return SIM_ARM_FPS_REGNUM + reg;
4221 reg -= NUM_SREGS;
4222
4223 internal_error (__FILE__, __LINE__, _("Bad REGNUM %d"), regnum);
4224 }
4225
4226 /* NOTE: cagney/2001-08-20: Both convert_from_extended() and
4227 convert_to_extended() use floatformat_arm_ext_littlebyte_bigword.
4228 It is thought that this is is the floating-point register format on
4229 little-endian systems. */
4230
4231 static void
4232 convert_from_extended (const struct floatformat *fmt, const void *ptr,
4233 void *dbl, int endianess)
4234 {
4235 DOUBLEST d;
4236
4237 if (endianess == BFD_ENDIAN_BIG)
4238 floatformat_to_doublest (&floatformat_arm_ext_big, ptr, &d);
4239 else
4240 floatformat_to_doublest (&floatformat_arm_ext_littlebyte_bigword,
4241 ptr, &d);
4242 floatformat_from_doublest (fmt, &d, dbl);
4243 }
4244
4245 static void
4246 convert_to_extended (const struct floatformat *fmt, void *dbl, const void *ptr,
4247 int endianess)
4248 {
4249 DOUBLEST d;
4250
4251 floatformat_to_doublest (fmt, ptr, &d);
4252 if (endianess == BFD_ENDIAN_BIG)
4253 floatformat_from_doublest (&floatformat_arm_ext_big, &d, dbl);
4254 else
4255 floatformat_from_doublest (&floatformat_arm_ext_littlebyte_bigword,
4256 &d, dbl);
4257 }
4258
4259 static int
4260 condition_true (unsigned long cond, unsigned long status_reg)
4261 {
4262 if (cond == INST_AL || cond == INST_NV)
4263 return 1;
4264
4265 switch (cond)
4266 {
4267 case INST_EQ:
4268 return ((status_reg & FLAG_Z) != 0);
4269 case INST_NE:
4270 return ((status_reg & FLAG_Z) == 0);
4271 case INST_CS:
4272 return ((status_reg & FLAG_C) != 0);
4273 case INST_CC:
4274 return ((status_reg & FLAG_C) == 0);
4275 case INST_MI:
4276 return ((status_reg & FLAG_N) != 0);
4277 case INST_PL:
4278 return ((status_reg & FLAG_N) == 0);
4279 case INST_VS:
4280 return ((status_reg & FLAG_V) != 0);
4281 case INST_VC:
4282 return ((status_reg & FLAG_V) == 0);
4283 case INST_HI:
4284 return ((status_reg & (FLAG_C | FLAG_Z)) == FLAG_C);
4285 case INST_LS:
4286 return ((status_reg & (FLAG_C | FLAG_Z)) != FLAG_C);
4287 case INST_GE:
4288 return (((status_reg & FLAG_N) == 0) == ((status_reg & FLAG_V) == 0));
4289 case INST_LT:
4290 return (((status_reg & FLAG_N) == 0) != ((status_reg & FLAG_V) == 0));
4291 case INST_GT:
4292 return (((status_reg & FLAG_Z) == 0)
4293 && (((status_reg & FLAG_N) == 0)
4294 == ((status_reg & FLAG_V) == 0)));
4295 case INST_LE:
4296 return (((status_reg & FLAG_Z) != 0)
4297 || (((status_reg & FLAG_N) == 0)
4298 != ((status_reg & FLAG_V) == 0)));
4299 }
4300 return 1;
4301 }
4302
4303 static unsigned long
4304 shifted_reg_val (struct frame_info *frame, unsigned long inst, int carry,
4305 unsigned long pc_val, unsigned long status_reg)
4306 {
4307 unsigned long res, shift;
4308 int rm = bits (inst, 0, 3);
4309 unsigned long shifttype = bits (inst, 5, 6);
4310
4311 if (bit (inst, 4))
4312 {
4313 int rs = bits (inst, 8, 11);
4314 shift = (rs == 15 ? pc_val + 8
4315 : get_frame_register_unsigned (frame, rs)) & 0xFF;
4316 }
4317 else
4318 shift = bits (inst, 7, 11);
4319
4320 res = (rm == ARM_PC_REGNUM
4321 ? (pc_val + (bit (inst, 4) ? 12 : 8))
4322 : get_frame_register_unsigned (frame, rm));
4323
4324 switch (shifttype)
4325 {
4326 case 0: /* LSL */
4327 res = shift >= 32 ? 0 : res << shift;
4328 break;
4329
4330 case 1: /* LSR */
4331 res = shift >= 32 ? 0 : res >> shift;
4332 break;
4333
4334 case 2: /* ASR */
4335 if (shift >= 32)
4336 shift = 31;
4337 res = ((res & 0x80000000L)
4338 ? ~((~res) >> shift) : res >> shift);
4339 break;
4340
4341 case 3: /* ROR/RRX */
4342 shift &= 31;
4343 if (shift == 0)
4344 res = (res >> 1) | (carry ? 0x80000000L : 0);
4345 else
4346 res = (res >> shift) | (res << (32 - shift));
4347 break;
4348 }
4349
4350 return res & 0xffffffff;
4351 }
4352
4353 /* Return number of 1-bits in VAL. */
4354
4355 static int
4356 bitcount (unsigned long val)
4357 {
4358 int nbits;
4359 for (nbits = 0; val != 0; nbits++)
4360 val &= val - 1; /* Delete rightmost 1-bit in val. */
4361 return nbits;
4362 }
4363
4364 /* Return the size in bytes of the complete Thumb instruction whose
4365 first halfword is INST1. */
4366
4367 static int
4368 thumb_insn_size (unsigned short inst1)
4369 {
4370 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
4371 return 4;
4372 else
4373 return 2;
4374 }
4375
4376 static int
4377 thumb_advance_itstate (unsigned int itstate)
4378 {
4379 /* Preserve IT[7:5], the first three bits of the condition. Shift
4380 the upcoming condition flags left by one bit. */
4381 itstate = (itstate & 0xe0) | ((itstate << 1) & 0x1f);
4382
4383 /* If we have finished the IT block, clear the state. */
4384 if ((itstate & 0x0f) == 0)
4385 itstate = 0;
4386
4387 return itstate;
4388 }
4389
4390 /* Find the next PC after the current instruction executes. In some
4391 cases we can not statically determine the answer (see the IT state
4392 handling in this function); in that case, a breakpoint may be
4393 inserted in addition to the returned PC, which will be used to set
4394 another breakpoint by our caller. */
4395
4396 static CORE_ADDR
4397 thumb_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc)
4398 {
4399 struct gdbarch *gdbarch = get_frame_arch (frame);
4400 struct address_space *aspace = get_frame_address_space (frame);
4401 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4402 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4403 unsigned long pc_val = ((unsigned long) pc) + 4; /* PC after prefetch */
4404 unsigned short inst1;
4405 CORE_ADDR nextpc = pc + 2; /* Default is next instruction. */
4406 unsigned long offset;
4407 ULONGEST status, itstate;
4408
4409 nextpc = MAKE_THUMB_ADDR (nextpc);
4410 pc_val = MAKE_THUMB_ADDR (pc_val);
4411
4412 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
4413
4414 /* Thumb-2 conditional execution support. There are eight bits in
4415 the CPSR which describe conditional execution state. Once
4416 reconstructed (they're in a funny order), the low five bits
4417 describe the low bit of the condition for each instruction and
4418 how many instructions remain. The high three bits describe the
4419 base condition. One of the low four bits will be set if an IT
4420 block is active. These bits read as zero on earlier
4421 processors. */
4422 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
4423 itstate = ((status >> 8) & 0xfc) | ((status >> 25) & 0x3);
4424
4425 /* If-Then handling. On GNU/Linux, where this routine is used, we
4426 use an undefined instruction as a breakpoint. Unlike BKPT, IT
4427 can disable execution of the undefined instruction. So we might
4428 miss the breakpoint if we set it on a skipped conditional
4429 instruction. Because conditional instructions can change the
4430 flags, affecting the execution of further instructions, we may
4431 need to set two breakpoints. */
4432
4433 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint != NULL)
4434 {
4435 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
4436 {
4437 /* An IT instruction. Because this instruction does not
4438 modify the flags, we can accurately predict the next
4439 executed instruction. */
4440 itstate = inst1 & 0x00ff;
4441 pc += thumb_insn_size (inst1);
4442
4443 while (itstate != 0 && ! condition_true (itstate >> 4, status))
4444 {
4445 inst1 = read_memory_unsigned_integer (pc, 2,
4446 byte_order_for_code);
4447 pc += thumb_insn_size (inst1);
4448 itstate = thumb_advance_itstate (itstate);
4449 }
4450
4451 return MAKE_THUMB_ADDR (pc);
4452 }
4453 else if (itstate != 0)
4454 {
4455 /* We are in a conditional block. Check the condition. */
4456 if (! condition_true (itstate >> 4, status))
4457 {
4458 /* Advance to the next executed instruction. */
4459 pc += thumb_insn_size (inst1);
4460 itstate = thumb_advance_itstate (itstate);
4461
4462 while (itstate != 0 && ! condition_true (itstate >> 4, status))
4463 {
4464 inst1 = read_memory_unsigned_integer (pc, 2,
4465 byte_order_for_code);
4466 pc += thumb_insn_size (inst1);
4467 itstate = thumb_advance_itstate (itstate);
4468 }
4469
4470 return MAKE_THUMB_ADDR (pc);
4471 }
4472 else if ((itstate & 0x0f) == 0x08)
4473 {
4474 /* This is the last instruction of the conditional
4475 block, and it is executed. We can handle it normally
4476 because the following instruction is not conditional,
4477 and we must handle it normally because it is
4478 permitted to branch. Fall through. */
4479 }
4480 else
4481 {
4482 int cond_negated;
4483
4484 /* There are conditional instructions after this one.
4485 If this instruction modifies the flags, then we can
4486 not predict what the next executed instruction will
4487 be. Fortunately, this instruction is architecturally
4488 forbidden to branch; we know it will fall through.
4489 Start by skipping past it. */
4490 pc += thumb_insn_size (inst1);
4491 itstate = thumb_advance_itstate (itstate);
4492
4493 /* Set a breakpoint on the following instruction. */
4494 gdb_assert ((itstate & 0x0f) != 0);
4495 arm_insert_single_step_breakpoint (gdbarch, aspace,
4496 MAKE_THUMB_ADDR (pc));
4497 cond_negated = (itstate >> 4) & 1;
4498
4499 /* Skip all following instructions with the same
4500 condition. If there is a later instruction in the IT
4501 block with the opposite condition, set the other
4502 breakpoint there. If not, then set a breakpoint on
4503 the instruction after the IT block. */
4504 do
4505 {
4506 inst1 = read_memory_unsigned_integer (pc, 2,
4507 byte_order_for_code);
4508 pc += thumb_insn_size (inst1);
4509 itstate = thumb_advance_itstate (itstate);
4510 }
4511 while (itstate != 0 && ((itstate >> 4) & 1) == cond_negated);
4512
4513 return MAKE_THUMB_ADDR (pc);
4514 }
4515 }
4516 }
4517 else if (itstate & 0x0f)
4518 {
4519 /* We are in a conditional block. Check the condition. */
4520 int cond = itstate >> 4;
4521
4522 if (! condition_true (cond, status))
4523 /* Advance to the next instruction. All the 32-bit
4524 instructions share a common prefix. */
4525 return MAKE_THUMB_ADDR (pc + thumb_insn_size (inst1));
4526
4527 /* Otherwise, handle the instruction normally. */
4528 }
4529
4530 if ((inst1 & 0xff00) == 0xbd00) /* pop {rlist, pc} */
4531 {
4532 CORE_ADDR sp;
4533
4534 /* Fetch the saved PC from the stack. It's stored above
4535 all of the other registers. */
4536 offset = bitcount (bits (inst1, 0, 7)) * INT_REGISTER_SIZE;
4537 sp = get_frame_register_unsigned (frame, ARM_SP_REGNUM);
4538 nextpc = read_memory_unsigned_integer (sp + offset, 4, byte_order);
4539 }
4540 else if ((inst1 & 0xf000) == 0xd000) /* conditional branch */
4541 {
4542 unsigned long cond = bits (inst1, 8, 11);
4543 if (cond == 0x0f) /* 0x0f = SWI */
4544 {
4545 struct gdbarch_tdep *tdep;
4546 tdep = gdbarch_tdep (gdbarch);
4547
4548 if (tdep->syscall_next_pc != NULL)
4549 nextpc = tdep->syscall_next_pc (frame);
4550
4551 }
4552 else if (cond != 0x0f && condition_true (cond, status))
4553 nextpc = pc_val + (sbits (inst1, 0, 7) << 1);
4554 }
4555 else if ((inst1 & 0xf800) == 0xe000) /* unconditional branch */
4556 {
4557 nextpc = pc_val + (sbits (inst1, 0, 10) << 1);
4558 }
4559 else if (thumb_insn_size (inst1) == 4) /* 32-bit instruction */
4560 {
4561 unsigned short inst2;
4562 inst2 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
4563
4564 /* Default to the next instruction. */
4565 nextpc = pc + 4;
4566 nextpc = MAKE_THUMB_ADDR (nextpc);
4567
4568 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
4569 {
4570 /* Branches and miscellaneous control instructions. */
4571
4572 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
4573 {
4574 /* B, BL, BLX. */
4575 int j1, j2, imm1, imm2;
4576
4577 imm1 = sbits (inst1, 0, 10);
4578 imm2 = bits (inst2, 0, 10);
4579 j1 = bit (inst2, 13);
4580 j2 = bit (inst2, 11);
4581
4582 offset = ((imm1 << 12) + (imm2 << 1));
4583 offset ^= ((!j2) << 22) | ((!j1) << 23);
4584
4585 nextpc = pc_val + offset;
4586 /* For BLX make sure to clear the low bits. */
4587 if (bit (inst2, 12) == 0)
4588 nextpc = nextpc & 0xfffffffc;
4589 }
4590 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
4591 {
4592 /* SUBS PC, LR, #imm8. */
4593 nextpc = get_frame_register_unsigned (frame, ARM_LR_REGNUM);
4594 nextpc -= inst2 & 0x00ff;
4595 }
4596 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
4597 {
4598 /* Conditional branch. */
4599 if (condition_true (bits (inst1, 6, 9), status))
4600 {
4601 int sign, j1, j2, imm1, imm2;
4602
4603 sign = sbits (inst1, 10, 10);
4604 imm1 = bits (inst1, 0, 5);
4605 imm2 = bits (inst2, 0, 10);
4606 j1 = bit (inst2, 13);
4607 j2 = bit (inst2, 11);
4608
4609 offset = (sign << 20) + (j2 << 19) + (j1 << 18);
4610 offset += (imm1 << 12) + (imm2 << 1);
4611
4612 nextpc = pc_val + offset;
4613 }
4614 }
4615 }
4616 else if ((inst1 & 0xfe50) == 0xe810)
4617 {
4618 /* Load multiple or RFE. */
4619 int rn, offset, load_pc = 1;
4620
4621 rn = bits (inst1, 0, 3);
4622 if (bit (inst1, 7) && !bit (inst1, 8))
4623 {
4624 /* LDMIA or POP */
4625 if (!bit (inst2, 15))
4626 load_pc = 0;
4627 offset = bitcount (inst2) * 4 - 4;
4628 }
4629 else if (!bit (inst1, 7) && bit (inst1, 8))
4630 {
4631 /* LDMDB */
4632 if (!bit (inst2, 15))
4633 load_pc = 0;
4634 offset = -4;
4635 }
4636 else if (bit (inst1, 7) && bit (inst1, 8))
4637 {
4638 /* RFEIA */
4639 offset = 0;
4640 }
4641 else if (!bit (inst1, 7) && !bit (inst1, 8))
4642 {
4643 /* RFEDB */
4644 offset = -8;
4645 }
4646 else
4647 load_pc = 0;
4648
4649 if (load_pc)
4650 {
4651 CORE_ADDR addr = get_frame_register_unsigned (frame, rn);
4652 nextpc = get_frame_memory_unsigned (frame, addr + offset, 4);
4653 }
4654 }
4655 else if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
4656 {
4657 /* MOV PC or MOVS PC. */
4658 nextpc = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4659 nextpc = MAKE_THUMB_ADDR (nextpc);
4660 }
4661 else if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
4662 {
4663 /* LDR PC. */
4664 CORE_ADDR base;
4665 int rn, load_pc = 1;
4666
4667 rn = bits (inst1, 0, 3);
4668 base = get_frame_register_unsigned (frame, rn);
4669 if (rn == ARM_PC_REGNUM)
4670 {
4671 base = (base + 4) & ~(CORE_ADDR) 0x3;
4672 if (bit (inst1, 7))
4673 base += bits (inst2, 0, 11);
4674 else
4675 base -= bits (inst2, 0, 11);
4676 }
4677 else if (bit (inst1, 7))
4678 base += bits (inst2, 0, 11);
4679 else if (bit (inst2, 11))
4680 {
4681 if (bit (inst2, 10))
4682 {
4683 if (bit (inst2, 9))
4684 base += bits (inst2, 0, 7);
4685 else
4686 base -= bits (inst2, 0, 7);
4687 }
4688 }
4689 else if ((inst2 & 0x0fc0) == 0x0000)
4690 {
4691 int shift = bits (inst2, 4, 5), rm = bits (inst2, 0, 3);
4692 base += get_frame_register_unsigned (frame, rm) << shift;
4693 }
4694 else
4695 /* Reserved. */
4696 load_pc = 0;
4697
4698 if (load_pc)
4699 nextpc = get_frame_memory_unsigned (frame, base, 4);
4700 }
4701 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
4702 {
4703 /* TBB. */
4704 CORE_ADDR tbl_reg, table, offset, length;
4705
4706 tbl_reg = bits (inst1, 0, 3);
4707 if (tbl_reg == 0x0f)
4708 table = pc + 4; /* Regcache copy of PC isn't right yet. */
4709 else
4710 table = get_frame_register_unsigned (frame, tbl_reg);
4711
4712 offset = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4713 length = 2 * get_frame_memory_unsigned (frame, table + offset, 1);
4714 nextpc = pc_val + length;
4715 }
4716 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
4717 {
4718 /* TBH. */
4719 CORE_ADDR tbl_reg, table, offset, length;
4720
4721 tbl_reg = bits (inst1, 0, 3);
4722 if (tbl_reg == 0x0f)
4723 table = pc + 4; /* Regcache copy of PC isn't right yet. */
4724 else
4725 table = get_frame_register_unsigned (frame, tbl_reg);
4726
4727 offset = 2 * get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4728 length = 2 * get_frame_memory_unsigned (frame, table + offset, 2);
4729 nextpc = pc_val + length;
4730 }
4731 }
4732 else if ((inst1 & 0xff00) == 0x4700) /* bx REG, blx REG */
4733 {
4734 if (bits (inst1, 3, 6) == 0x0f)
4735 nextpc = UNMAKE_THUMB_ADDR (pc_val);
4736 else
4737 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
4738 }
4739 else if ((inst1 & 0xff87) == 0x4687) /* mov pc, REG */
4740 {
4741 if (bits (inst1, 3, 6) == 0x0f)
4742 nextpc = pc_val;
4743 else
4744 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
4745
4746 nextpc = MAKE_THUMB_ADDR (nextpc);
4747 }
4748 else if ((inst1 & 0xf500) == 0xb100)
4749 {
4750 /* CBNZ or CBZ. */
4751 int imm = (bit (inst1, 9) << 6) + (bits (inst1, 3, 7) << 1);
4752 ULONGEST reg = get_frame_register_unsigned (frame, bits (inst1, 0, 2));
4753
4754 if (bit (inst1, 11) && reg != 0)
4755 nextpc = pc_val + imm;
4756 else if (!bit (inst1, 11) && reg == 0)
4757 nextpc = pc_val + imm;
4758 }
4759 return nextpc;
4760 }
4761
4762 /* Get the raw next address. PC is the current program counter, in
4763 FRAME, which is assumed to be executing in ARM mode.
4764
4765 The value returned has the execution state of the next instruction
4766 encoded in it. Use IS_THUMB_ADDR () to see whether the instruction is
4767 in Thumb-State, and gdbarch_addr_bits_remove () to get the plain memory
4768 address. */
4769
4770 static CORE_ADDR
4771 arm_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc)
4772 {
4773 struct gdbarch *gdbarch = get_frame_arch (frame);
4774 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4775 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4776 unsigned long pc_val;
4777 unsigned long this_instr;
4778 unsigned long status;
4779 CORE_ADDR nextpc;
4780
4781 pc_val = (unsigned long) pc;
4782 this_instr = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
4783
4784 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
4785 nextpc = (CORE_ADDR) (pc_val + 4); /* Default case */
4786
4787 if (bits (this_instr, 28, 31) == INST_NV)
4788 switch (bits (this_instr, 24, 27))
4789 {
4790 case 0xa:
4791 case 0xb:
4792 {
4793 /* Branch with Link and change to Thumb. */
4794 nextpc = BranchDest (pc, this_instr);
4795 nextpc |= bit (this_instr, 24) << 1;
4796 nextpc = MAKE_THUMB_ADDR (nextpc);
4797 break;
4798 }
4799 case 0xc:
4800 case 0xd:
4801 case 0xe:
4802 /* Coprocessor register transfer. */
4803 if (bits (this_instr, 12, 15) == 15)
4804 error (_("Invalid update to pc in instruction"));
4805 break;
4806 }
4807 else if (condition_true (bits (this_instr, 28, 31), status))
4808 {
4809 switch (bits (this_instr, 24, 27))
4810 {
4811 case 0x0:
4812 case 0x1: /* data processing */
4813 case 0x2:
4814 case 0x3:
4815 {
4816 unsigned long operand1, operand2, result = 0;
4817 unsigned long rn;
4818 int c;
4819
4820 if (bits (this_instr, 12, 15) != 15)
4821 break;
4822
4823 if (bits (this_instr, 22, 25) == 0
4824 && bits (this_instr, 4, 7) == 9) /* multiply */
4825 error (_("Invalid update to pc in instruction"));
4826
4827 /* BX <reg>, BLX <reg> */
4828 if (bits (this_instr, 4, 27) == 0x12fff1
4829 || bits (this_instr, 4, 27) == 0x12fff3)
4830 {
4831 rn = bits (this_instr, 0, 3);
4832 nextpc = ((rn == ARM_PC_REGNUM)
4833 ? (pc_val + 8)
4834 : get_frame_register_unsigned (frame, rn));
4835
4836 return nextpc;
4837 }
4838
4839 /* Multiply into PC. */
4840 c = (status & FLAG_C) ? 1 : 0;
4841 rn = bits (this_instr, 16, 19);
4842 operand1 = ((rn == ARM_PC_REGNUM)
4843 ? (pc_val + 8)
4844 : get_frame_register_unsigned (frame, rn));
4845
4846 if (bit (this_instr, 25))
4847 {
4848 unsigned long immval = bits (this_instr, 0, 7);
4849 unsigned long rotate = 2 * bits (this_instr, 8, 11);
4850 operand2 = ((immval >> rotate) | (immval << (32 - rotate)))
4851 & 0xffffffff;
4852 }
4853 else /* operand 2 is a shifted register. */
4854 operand2 = shifted_reg_val (frame, this_instr, c,
4855 pc_val, status);
4856
4857 switch (bits (this_instr, 21, 24))
4858 {
4859 case 0x0: /*and */
4860 result = operand1 & operand2;
4861 break;
4862
4863 case 0x1: /*eor */
4864 result = operand1 ^ operand2;
4865 break;
4866
4867 case 0x2: /*sub */
4868 result = operand1 - operand2;
4869 break;
4870
4871 case 0x3: /*rsb */
4872 result = operand2 - operand1;
4873 break;
4874
4875 case 0x4: /*add */
4876 result = operand1 + operand2;
4877 break;
4878
4879 case 0x5: /*adc */
4880 result = operand1 + operand2 + c;
4881 break;
4882
4883 case 0x6: /*sbc */
4884 result = operand1 - operand2 + c;
4885 break;
4886
4887 case 0x7: /*rsc */
4888 result = operand2 - operand1 + c;
4889 break;
4890
4891 case 0x8:
4892 case 0x9:
4893 case 0xa:
4894 case 0xb: /* tst, teq, cmp, cmn */
4895 result = (unsigned long) nextpc;
4896 break;
4897
4898 case 0xc: /*orr */
4899 result = operand1 | operand2;
4900 break;
4901
4902 case 0xd: /*mov */
4903 /* Always step into a function. */
4904 result = operand2;
4905 break;
4906
4907 case 0xe: /*bic */
4908 result = operand1 & ~operand2;
4909 break;
4910
4911 case 0xf: /*mvn */
4912 result = ~operand2;
4913 break;
4914 }
4915
4916 /* In 26-bit APCS the bottom two bits of the result are
4917 ignored, and we always end up in ARM state. */
4918 if (!arm_apcs_32)
4919 nextpc = arm_addr_bits_remove (gdbarch, result);
4920 else
4921 nextpc = result;
4922
4923 break;
4924 }
4925
4926 case 0x4:
4927 case 0x5: /* data transfer */
4928 case 0x6:
4929 case 0x7:
4930 if (bit (this_instr, 20))
4931 {
4932 /* load */
4933 if (bits (this_instr, 12, 15) == 15)
4934 {
4935 /* rd == pc */
4936 unsigned long rn;
4937 unsigned long base;
4938
4939 if (bit (this_instr, 22))
4940 error (_("Invalid update to pc in instruction"));
4941
4942 /* byte write to PC */
4943 rn = bits (this_instr, 16, 19);
4944 base = ((rn == ARM_PC_REGNUM)
4945 ? (pc_val + 8)
4946 : get_frame_register_unsigned (frame, rn));
4947
4948 if (bit (this_instr, 24))
4949 {
4950 /* pre-indexed */
4951 int c = (status & FLAG_C) ? 1 : 0;
4952 unsigned long offset =
4953 (bit (this_instr, 25)
4954 ? shifted_reg_val (frame, this_instr, c, pc_val, status)
4955 : bits (this_instr, 0, 11));
4956
4957 if (bit (this_instr, 23))
4958 base += offset;
4959 else
4960 base -= offset;
4961 }
4962 nextpc =
4963 (CORE_ADDR) read_memory_unsigned_integer ((CORE_ADDR) base,
4964 4, byte_order);
4965 }
4966 }
4967 break;
4968
4969 case 0x8:
4970 case 0x9: /* block transfer */
4971 if (bit (this_instr, 20))
4972 {
4973 /* LDM */
4974 if (bit (this_instr, 15))
4975 {
4976 /* loading pc */
4977 int offset = 0;
4978 unsigned long rn_val
4979 = get_frame_register_unsigned (frame,
4980 bits (this_instr, 16, 19));
4981
4982 if (bit (this_instr, 23))
4983 {
4984 /* up */
4985 unsigned long reglist = bits (this_instr, 0, 14);
4986 offset = bitcount (reglist) * 4;
4987 if (bit (this_instr, 24)) /* pre */
4988 offset += 4;
4989 }
4990 else if (bit (this_instr, 24))
4991 offset = -4;
4992
4993 nextpc =
4994 (CORE_ADDR) read_memory_unsigned_integer ((CORE_ADDR)
4995 (rn_val + offset),
4996 4, byte_order);
4997 }
4998 }
4999 break;
5000
5001 case 0xb: /* branch & link */
5002 case 0xa: /* branch */
5003 {
5004 nextpc = BranchDest (pc, this_instr);
5005 break;
5006 }
5007
5008 case 0xc:
5009 case 0xd:
5010 case 0xe: /* coproc ops */
5011 break;
5012 case 0xf: /* SWI */
5013 {
5014 struct gdbarch_tdep *tdep;
5015 tdep = gdbarch_tdep (gdbarch);
5016
5017 if (tdep->syscall_next_pc != NULL)
5018 nextpc = tdep->syscall_next_pc (frame);
5019
5020 }
5021 break;
5022
5023 default:
5024 fprintf_filtered (gdb_stderr, _("Bad bit-field extraction\n"));
5025 return (pc);
5026 }
5027 }
5028
5029 return nextpc;
5030 }
5031
5032 /* Determine next PC after current instruction executes. Will call either
5033 arm_get_next_pc_raw or thumb_get_next_pc_raw. Error out if infinite
5034 loop is detected. */
5035
5036 CORE_ADDR
5037 arm_get_next_pc (struct frame_info *frame, CORE_ADDR pc)
5038 {
5039 CORE_ADDR nextpc;
5040
5041 if (arm_frame_is_thumb (frame))
5042 nextpc = thumb_get_next_pc_raw (frame, pc);
5043 else
5044 nextpc = arm_get_next_pc_raw (frame, pc);
5045
5046 return nextpc;
5047 }
5048
5049 /* Like insert_single_step_breakpoint, but make sure we use a breakpoint
5050 of the appropriate mode (as encoded in the PC value), even if this
5051 differs from what would be expected according to the symbol tables. */
5052
5053 void
5054 arm_insert_single_step_breakpoint (struct gdbarch *gdbarch,
5055 struct address_space *aspace,
5056 CORE_ADDR pc)
5057 {
5058 struct cleanup *old_chain
5059 = make_cleanup_restore_integer (&arm_override_mode);
5060
5061 arm_override_mode = IS_THUMB_ADDR (pc);
5062 pc = gdbarch_addr_bits_remove (gdbarch, pc);
5063
5064 insert_single_step_breakpoint (gdbarch, aspace, pc);
5065
5066 do_cleanups (old_chain);
5067 }
5068
5069 /* Checks for an atomic sequence of instructions beginning with a LDREX{,B,H,D}
5070 instruction and ending with a STREX{,B,H,D} instruction. If such a sequence
5071 is found, attempt to step through it. A breakpoint is placed at the end of
5072 the sequence. */
5073
5074 static int
5075 thumb_deal_with_atomic_sequence_raw (struct frame_info *frame)
5076 {
5077 struct gdbarch *gdbarch = get_frame_arch (frame);
5078 struct address_space *aspace = get_frame_address_space (frame);
5079 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
5080 CORE_ADDR pc = get_frame_pc (frame);
5081 CORE_ADDR breaks[2] = {-1, -1};
5082 CORE_ADDR loc = pc;
5083 unsigned short insn1, insn2;
5084 int insn_count;
5085 int index;
5086 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
5087 const int atomic_sequence_length = 16; /* Instruction sequence length. */
5088 ULONGEST status, itstate;
5089
5090 /* We currently do not support atomic sequences within an IT block. */
5091 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
5092 itstate = ((status >> 8) & 0xfc) | ((status >> 25) & 0x3);
5093 if (itstate & 0x0f)
5094 return 0;
5095
5096 /* Assume all atomic sequences start with a ldrex{,b,h,d} instruction. */
5097 insn1 = read_memory_unsigned_integer (loc, 2, byte_order_for_code);
5098 loc += 2;
5099 if (thumb_insn_size (insn1) != 4)
5100 return 0;
5101
5102 insn2 = read_memory_unsigned_integer (loc, 2, byte_order_for_code);
5103 loc += 2;
5104 if (!((insn1 & 0xfff0) == 0xe850
5105 || ((insn1 & 0xfff0) == 0xe8d0 && (insn2 & 0x00c0) == 0x0040)))
5106 return 0;
5107
5108 /* Assume that no atomic sequence is longer than "atomic_sequence_length"
5109 instructions. */
5110 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
5111 {
5112 insn1 = read_memory_unsigned_integer (loc, 2, byte_order_for_code);
5113 loc += 2;
5114
5115 if (thumb_insn_size (insn1) != 4)
5116 {
5117 /* Assume that there is at most one conditional branch in the
5118 atomic sequence. If a conditional branch is found, put a
5119 breakpoint in its destination address. */
5120 if ((insn1 & 0xf000) == 0xd000 && bits (insn1, 8, 11) != 0x0f)
5121 {
5122 if (last_breakpoint > 0)
5123 return 0; /* More than one conditional branch found,
5124 fallback to the standard code. */
5125
5126 breaks[1] = loc + 2 + (sbits (insn1, 0, 7) << 1);
5127 last_breakpoint++;
5128 }
5129
5130 /* We do not support atomic sequences that use any *other*
5131 instructions but conditional branches to change the PC.
5132 Fall back to standard code to avoid losing control of
5133 execution. */
5134 else if (thumb_instruction_changes_pc (insn1))
5135 return 0;
5136 }
5137 else
5138 {
5139 insn2 = read_memory_unsigned_integer (loc, 2, byte_order_for_code);
5140 loc += 2;
5141
5142 /* Assume that there is at most one conditional branch in the
5143 atomic sequence. If a conditional branch is found, put a
5144 breakpoint in its destination address. */
5145 if ((insn1 & 0xf800) == 0xf000
5146 && (insn2 & 0xd000) == 0x8000
5147 && (insn1 & 0x0380) != 0x0380)
5148 {
5149 int sign, j1, j2, imm1, imm2;
5150 unsigned int offset;
5151
5152 sign = sbits (insn1, 10, 10);
5153 imm1 = bits (insn1, 0, 5);
5154 imm2 = bits (insn2, 0, 10);
5155 j1 = bit (insn2, 13);
5156 j2 = bit (insn2, 11);
5157
5158 offset = (sign << 20) + (j2 << 19) + (j1 << 18);
5159 offset += (imm1 << 12) + (imm2 << 1);
5160
5161 if (last_breakpoint > 0)
5162 return 0; /* More than one conditional branch found,
5163 fallback to the standard code. */
5164
5165 breaks[1] = loc + offset;
5166 last_breakpoint++;
5167 }
5168
5169 /* We do not support atomic sequences that use any *other*
5170 instructions but conditional branches to change the PC.
5171 Fall back to standard code to avoid losing control of
5172 execution. */
5173 else if (thumb2_instruction_changes_pc (insn1, insn2))
5174 return 0;
5175
5176 /* If we find a strex{,b,h,d}, we're done. */
5177 if ((insn1 & 0xfff0) == 0xe840
5178 || ((insn1 & 0xfff0) == 0xe8c0 && (insn2 & 0x00c0) == 0x0040))
5179 break;
5180 }
5181 }
5182
5183 /* If we didn't find the strex{,b,h,d}, we cannot handle the sequence. */
5184 if (insn_count == atomic_sequence_length)
5185 return 0;
5186
5187 /* Insert a breakpoint right after the end of the atomic sequence. */
5188 breaks[0] = loc;
5189
5190 /* Check for duplicated breakpoints. Check also for a breakpoint
5191 placed (branch instruction's destination) anywhere in sequence. */
5192 if (last_breakpoint
5193 && (breaks[1] == breaks[0]
5194 || (breaks[1] >= pc && breaks[1] < loc)))
5195 last_breakpoint = 0;
5196
5197 /* Effectively inserts the breakpoints. */
5198 for (index = 0; index <= last_breakpoint; index++)
5199 arm_insert_single_step_breakpoint (gdbarch, aspace,
5200 MAKE_THUMB_ADDR (breaks[index]));
5201
5202 return 1;
5203 }
5204
5205 static int
5206 arm_deal_with_atomic_sequence_raw (struct frame_info *frame)
5207 {
5208 struct gdbarch *gdbarch = get_frame_arch (frame);
5209 struct address_space *aspace = get_frame_address_space (frame);
5210 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
5211 CORE_ADDR pc = get_frame_pc (frame);
5212 CORE_ADDR breaks[2] = {-1, -1};
5213 CORE_ADDR loc = pc;
5214 unsigned int insn;
5215 int insn_count;
5216 int index;
5217 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
5218 const int atomic_sequence_length = 16; /* Instruction sequence length. */
5219
5220 /* Assume all atomic sequences start with a ldrex{,b,h,d} instruction.
5221 Note that we do not currently support conditionally executed atomic
5222 instructions. */
5223 insn = read_memory_unsigned_integer (loc, 4, byte_order_for_code);
5224 loc += 4;
5225 if ((insn & 0xff9000f0) != 0xe1900090)
5226 return 0;
5227
5228 /* Assume that no atomic sequence is longer than "atomic_sequence_length"
5229 instructions. */
5230 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
5231 {
5232 insn = read_memory_unsigned_integer (loc, 4, byte_order_for_code);
5233 loc += 4;
5234
5235 /* Assume that there is at most one conditional branch in the atomic
5236 sequence. If a conditional branch is found, put a breakpoint in
5237 its destination address. */
5238 if (bits (insn, 24, 27) == 0xa)
5239 {
5240 if (last_breakpoint > 0)
5241 return 0; /* More than one conditional branch found, fallback
5242 to the standard single-step code. */
5243
5244 breaks[1] = BranchDest (loc - 4, insn);
5245 last_breakpoint++;
5246 }
5247
5248 /* We do not support atomic sequences that use any *other* instructions
5249 but conditional branches to change the PC. Fall back to standard
5250 code to avoid losing control of execution. */
5251 else if (arm_instruction_changes_pc (insn))
5252 return 0;
5253
5254 /* If we find a strex{,b,h,d}, we're done. */
5255 if ((insn & 0xff9000f0) == 0xe1800090)
5256 break;
5257 }
5258
5259 /* If we didn't find the strex{,b,h,d}, we cannot handle the sequence. */
5260 if (insn_count == atomic_sequence_length)
5261 return 0;
5262
5263 /* Insert a breakpoint right after the end of the atomic sequence. */
5264 breaks[0] = loc;
5265
5266 /* Check for duplicated breakpoints. Check also for a breakpoint
5267 placed (branch instruction's destination) anywhere in sequence. */
5268 if (last_breakpoint
5269 && (breaks[1] == breaks[0]
5270 || (breaks[1] >= pc && breaks[1] < loc)))
5271 last_breakpoint = 0;
5272
5273 /* Effectively inserts the breakpoints. */
5274 for (index = 0; index <= last_breakpoint; index++)
5275 arm_insert_single_step_breakpoint (gdbarch, aspace, breaks[index]);
5276
5277 return 1;
5278 }
5279
5280 int
5281 arm_deal_with_atomic_sequence (struct frame_info *frame)
5282 {
5283 if (arm_frame_is_thumb (frame))
5284 return thumb_deal_with_atomic_sequence_raw (frame);
5285 else
5286 return arm_deal_with_atomic_sequence_raw (frame);
5287 }
5288
5289 /* single_step() is called just before we want to resume the inferior,
5290 if we want to single-step it but there is no hardware or kernel
5291 single-step support. We find the target of the coming instruction
5292 and breakpoint it. */
5293
5294 int
5295 arm_software_single_step (struct frame_info *frame)
5296 {
5297 struct gdbarch *gdbarch = get_frame_arch (frame);
5298 struct address_space *aspace = get_frame_address_space (frame);
5299 CORE_ADDR next_pc;
5300
5301 if (arm_deal_with_atomic_sequence (frame))
5302 return 1;
5303
5304 next_pc = arm_get_next_pc (frame, get_frame_pc (frame));
5305 arm_insert_single_step_breakpoint (gdbarch, aspace, next_pc);
5306
5307 return 1;
5308 }
5309
5310 /* Given BUF, which is OLD_LEN bytes ending at ENDADDR, expand
5311 the buffer to be NEW_LEN bytes ending at ENDADDR. Return
5312 NULL if an error occurs. BUF is freed. */
5313
5314 static gdb_byte *
5315 extend_buffer_earlier (gdb_byte *buf, CORE_ADDR endaddr,
5316 int old_len, int new_len)
5317 {
5318 gdb_byte *new_buf;
5319 int bytes_to_read = new_len - old_len;
5320
5321 new_buf = xmalloc (new_len);
5322 memcpy (new_buf + bytes_to_read, buf, old_len);
5323 xfree (buf);
5324 if (target_read_memory (endaddr - new_len, new_buf, bytes_to_read) != 0)
5325 {
5326 xfree (new_buf);
5327 return NULL;
5328 }
5329 return new_buf;
5330 }
5331
5332 /* An IT block is at most the 2-byte IT instruction followed by
5333 four 4-byte instructions. The furthest back we must search to
5334 find an IT block that affects the current instruction is thus
5335 2 + 3 * 4 == 14 bytes. */
5336 #define MAX_IT_BLOCK_PREFIX 14
5337
5338 /* Use a quick scan if there are more than this many bytes of
5339 code. */
5340 #define IT_SCAN_THRESHOLD 32
5341
5342 /* Adjust a breakpoint's address to move breakpoints out of IT blocks.
5343 A breakpoint in an IT block may not be hit, depending on the
5344 condition flags. */
5345 static CORE_ADDR
5346 arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr)
5347 {
5348 gdb_byte *buf;
5349 char map_type;
5350 CORE_ADDR boundary, func_start;
5351 int buf_len;
5352 enum bfd_endian order = gdbarch_byte_order_for_code (gdbarch);
5353 int i, any, last_it, last_it_count;
5354
5355 /* If we are using BKPT breakpoints, none of this is necessary. */
5356 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint == NULL)
5357 return bpaddr;
5358
5359 /* ARM mode does not have this problem. */
5360 if (!arm_pc_is_thumb (gdbarch, bpaddr))
5361 return bpaddr;
5362
5363 /* We are setting a breakpoint in Thumb code that could potentially
5364 contain an IT block. The first step is to find how much Thumb
5365 code there is; we do not need to read outside of known Thumb
5366 sequences. */
5367 map_type = arm_find_mapping_symbol (bpaddr, &boundary);
5368 if (map_type == 0)
5369 /* Thumb-2 code must have mapping symbols to have a chance. */
5370 return bpaddr;
5371
5372 bpaddr = gdbarch_addr_bits_remove (gdbarch, bpaddr);
5373
5374 if (find_pc_partial_function (bpaddr, NULL, &func_start, NULL)
5375 && func_start > boundary)
5376 boundary = func_start;
5377
5378 /* Search for a candidate IT instruction. We have to do some fancy
5379 footwork to distinguish a real IT instruction from the second
5380 half of a 32-bit instruction, but there is no need for that if
5381 there's no candidate. */
5382 buf_len = min (bpaddr - boundary, MAX_IT_BLOCK_PREFIX);
5383 if (buf_len == 0)
5384 /* No room for an IT instruction. */
5385 return bpaddr;
5386
5387 buf = xmalloc (buf_len);
5388 if (target_read_memory (bpaddr - buf_len, buf, buf_len) != 0)
5389 return bpaddr;
5390 any = 0;
5391 for (i = 0; i < buf_len; i += 2)
5392 {
5393 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5394 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
5395 {
5396 any = 1;
5397 break;
5398 }
5399 }
5400 if (any == 0)
5401 {
5402 xfree (buf);
5403 return bpaddr;
5404 }
5405
5406 /* OK, the code bytes before this instruction contain at least one
5407 halfword which resembles an IT instruction. We know that it's
5408 Thumb code, but there are still two possibilities. Either the
5409 halfword really is an IT instruction, or it is the second half of
5410 a 32-bit Thumb instruction. The only way we can tell is to
5411 scan forwards from a known instruction boundary. */
5412 if (bpaddr - boundary > IT_SCAN_THRESHOLD)
5413 {
5414 int definite;
5415
5416 /* There's a lot of code before this instruction. Start with an
5417 optimistic search; it's easy to recognize halfwords that can
5418 not be the start of a 32-bit instruction, and use that to
5419 lock on to the instruction boundaries. */
5420 buf = extend_buffer_earlier (buf, bpaddr, buf_len, IT_SCAN_THRESHOLD);
5421 if (buf == NULL)
5422 return bpaddr;
5423 buf_len = IT_SCAN_THRESHOLD;
5424
5425 definite = 0;
5426 for (i = 0; i < buf_len - sizeof (buf) && ! definite; i += 2)
5427 {
5428 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5429 if (thumb_insn_size (inst1) == 2)
5430 {
5431 definite = 1;
5432 break;
5433 }
5434 }
5435
5436 /* At this point, if DEFINITE, BUF[I] is the first place we
5437 are sure that we know the instruction boundaries, and it is far
5438 enough from BPADDR that we could not miss an IT instruction
5439 affecting BPADDR. If ! DEFINITE, give up - start from a
5440 known boundary. */
5441 if (! definite)
5442 {
5443 buf = extend_buffer_earlier (buf, bpaddr, buf_len,
5444 bpaddr - boundary);
5445 if (buf == NULL)
5446 return bpaddr;
5447 buf_len = bpaddr - boundary;
5448 i = 0;
5449 }
5450 }
5451 else
5452 {
5453 buf = extend_buffer_earlier (buf, bpaddr, buf_len, bpaddr - boundary);
5454 if (buf == NULL)
5455 return bpaddr;
5456 buf_len = bpaddr - boundary;
5457 i = 0;
5458 }
5459
5460 /* Scan forwards. Find the last IT instruction before BPADDR. */
5461 last_it = -1;
5462 last_it_count = 0;
5463 while (i < buf_len)
5464 {
5465 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5466 last_it_count--;
5467 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
5468 {
5469 last_it = i;
5470 if (inst1 & 0x0001)
5471 last_it_count = 4;
5472 else if (inst1 & 0x0002)
5473 last_it_count = 3;
5474 else if (inst1 & 0x0004)
5475 last_it_count = 2;
5476 else
5477 last_it_count = 1;
5478 }
5479 i += thumb_insn_size (inst1);
5480 }
5481
5482 xfree (buf);
5483
5484 if (last_it == -1)
5485 /* There wasn't really an IT instruction after all. */
5486 return bpaddr;
5487
5488 if (last_it_count < 1)
5489 /* It was too far away. */
5490 return bpaddr;
5491
5492 /* This really is a trouble spot. Move the breakpoint to the IT
5493 instruction. */
5494 return bpaddr - buf_len + last_it;
5495 }
5496
5497 /* ARM displaced stepping support.
5498
5499 Generally ARM displaced stepping works as follows:
5500
5501 1. When an instruction is to be single-stepped, it is first decoded by
5502 arm_process_displaced_insn (called from arm_displaced_step_copy_insn).
5503 Depending on the type of instruction, it is then copied to a scratch
5504 location, possibly in a modified form. The copy_* set of functions
5505 performs such modification, as necessary. A breakpoint is placed after
5506 the modified instruction in the scratch space to return control to GDB.
5507 Note in particular that instructions which modify the PC will no longer
5508 do so after modification.
5509
5510 2. The instruction is single-stepped, by setting the PC to the scratch
5511 location address, and resuming. Control returns to GDB when the
5512 breakpoint is hit.
5513
5514 3. A cleanup function (cleanup_*) is called corresponding to the copy_*
5515 function used for the current instruction. This function's job is to
5516 put the CPU/memory state back to what it would have been if the
5517 instruction had been executed unmodified in its original location. */
5518
5519 /* NOP instruction (mov r0, r0). */
5520 #define ARM_NOP 0xe1a00000
5521 #define THUMB_NOP 0x4600
5522
5523 /* Helper for register reads for displaced stepping. In particular, this
5524 returns the PC as it would be seen by the instruction at its original
5525 location. */
5526
5527 ULONGEST
5528 displaced_read_reg (struct regcache *regs, struct displaced_step_closure *dsc,
5529 int regno)
5530 {
5531 ULONGEST ret;
5532 CORE_ADDR from = dsc->insn_addr;
5533
5534 if (regno == ARM_PC_REGNUM)
5535 {
5536 /* Compute pipeline offset:
5537 - When executing an ARM instruction, PC reads as the address of the
5538 current instruction plus 8.
5539 - When executing a Thumb instruction, PC reads as the address of the
5540 current instruction plus 4. */
5541
5542 if (!dsc->is_thumb)
5543 from += 8;
5544 else
5545 from += 4;
5546
5547 if (debug_displaced)
5548 fprintf_unfiltered (gdb_stdlog, "displaced: read pc value %.8lx\n",
5549 (unsigned long) from);
5550 return (ULONGEST) from;
5551 }
5552 else
5553 {
5554 regcache_cooked_read_unsigned (regs, regno, &ret);
5555 if (debug_displaced)
5556 fprintf_unfiltered (gdb_stdlog, "displaced: read r%d value %.8lx\n",
5557 regno, (unsigned long) ret);
5558 return ret;
5559 }
5560 }
5561
5562 static int
5563 displaced_in_arm_mode (struct regcache *regs)
5564 {
5565 ULONGEST ps;
5566 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
5567
5568 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
5569
5570 return (ps & t_bit) == 0;
5571 }
5572
5573 /* Write to the PC as from a branch instruction. */
5574
5575 static void
5576 branch_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5577 ULONGEST val)
5578 {
5579 if (!dsc->is_thumb)
5580 /* Note: If bits 0/1 are set, this branch would be unpredictable for
5581 architecture versions < 6. */
5582 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
5583 val & ~(ULONGEST) 0x3);
5584 else
5585 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
5586 val & ~(ULONGEST) 0x1);
5587 }
5588
5589 /* Write to the PC as from a branch-exchange instruction. */
5590
5591 static void
5592 bx_write_pc (struct regcache *regs, ULONGEST val)
5593 {
5594 ULONGEST ps;
5595 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
5596
5597 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
5598
5599 if ((val & 1) == 1)
5600 {
5601 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps | t_bit);
5602 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffe);
5603 }
5604 else if ((val & 2) == 0)
5605 {
5606 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
5607 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val);
5608 }
5609 else
5610 {
5611 /* Unpredictable behaviour. Try to do something sensible (switch to ARM
5612 mode, align dest to 4 bytes). */
5613 warning (_("Single-stepping BX to non-word-aligned ARM instruction."));
5614 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
5615 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffc);
5616 }
5617 }
5618
5619 /* Write to the PC as if from a load instruction. */
5620
5621 static void
5622 load_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5623 ULONGEST val)
5624 {
5625 if (DISPLACED_STEPPING_ARCH_VERSION >= 5)
5626 bx_write_pc (regs, val);
5627 else
5628 branch_write_pc (regs, dsc, val);
5629 }
5630
5631 /* Write to the PC as if from an ALU instruction. */
5632
5633 static void
5634 alu_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5635 ULONGEST val)
5636 {
5637 if (DISPLACED_STEPPING_ARCH_VERSION >= 7 && !dsc->is_thumb)
5638 bx_write_pc (regs, val);
5639 else
5640 branch_write_pc (regs, dsc, val);
5641 }
5642
5643 /* Helper for writing to registers for displaced stepping. Writing to the PC
5644 has a varying effects depending on the instruction which does the write:
5645 this is controlled by the WRITE_PC argument. */
5646
5647 void
5648 displaced_write_reg (struct regcache *regs, struct displaced_step_closure *dsc,
5649 int regno, ULONGEST val, enum pc_write_style write_pc)
5650 {
5651 if (regno == ARM_PC_REGNUM)
5652 {
5653 if (debug_displaced)
5654 fprintf_unfiltered (gdb_stdlog, "displaced: writing pc %.8lx\n",
5655 (unsigned long) val);
5656 switch (write_pc)
5657 {
5658 case BRANCH_WRITE_PC:
5659 branch_write_pc (regs, dsc, val);
5660 break;
5661
5662 case BX_WRITE_PC:
5663 bx_write_pc (regs, val);
5664 break;
5665
5666 case LOAD_WRITE_PC:
5667 load_write_pc (regs, dsc, val);
5668 break;
5669
5670 case ALU_WRITE_PC:
5671 alu_write_pc (regs, dsc, val);
5672 break;
5673
5674 case CANNOT_WRITE_PC:
5675 warning (_("Instruction wrote to PC in an unexpected way when "
5676 "single-stepping"));
5677 break;
5678
5679 default:
5680 internal_error (__FILE__, __LINE__,
5681 _("Invalid argument to displaced_write_reg"));
5682 }
5683
5684 dsc->wrote_to_pc = 1;
5685 }
5686 else
5687 {
5688 if (debug_displaced)
5689 fprintf_unfiltered (gdb_stdlog, "displaced: writing r%d value %.8lx\n",
5690 regno, (unsigned long) val);
5691 regcache_cooked_write_unsigned (regs, regno, val);
5692 }
5693 }
5694
5695 /* This function is used to concisely determine if an instruction INSN
5696 references PC. Register fields of interest in INSN should have the
5697 corresponding fields of BITMASK set to 0b1111. The function
5698 returns return 1 if any of these fields in INSN reference the PC
5699 (also 0b1111, r15), else it returns 0. */
5700
5701 static int
5702 insn_references_pc (uint32_t insn, uint32_t bitmask)
5703 {
5704 uint32_t lowbit = 1;
5705
5706 while (bitmask != 0)
5707 {
5708 uint32_t mask;
5709
5710 for (; lowbit && (bitmask & lowbit) == 0; lowbit <<= 1)
5711 ;
5712
5713 if (!lowbit)
5714 break;
5715
5716 mask = lowbit * 0xf;
5717
5718 if ((insn & mask) == mask)
5719 return 1;
5720
5721 bitmask &= ~mask;
5722 }
5723
5724 return 0;
5725 }
5726
5727 /* The simplest copy function. Many instructions have the same effect no
5728 matter what address they are executed at: in those cases, use this. */
5729
5730 static int
5731 arm_copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
5732 const char *iname, struct displaced_step_closure *dsc)
5733 {
5734 if (debug_displaced)
5735 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx, "
5736 "opcode/class '%s' unmodified\n", (unsigned long) insn,
5737 iname);
5738
5739 dsc->modinsn[0] = insn;
5740
5741 return 0;
5742 }
5743
5744 static int
5745 thumb_copy_unmodified_32bit (struct gdbarch *gdbarch, uint16_t insn1,
5746 uint16_t insn2, const char *iname,
5747 struct displaced_step_closure *dsc)
5748 {
5749 if (debug_displaced)
5750 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x %.4x, "
5751 "opcode/class '%s' unmodified\n", insn1, insn2,
5752 iname);
5753
5754 dsc->modinsn[0] = insn1;
5755 dsc->modinsn[1] = insn2;
5756 dsc->numinsns = 2;
5757
5758 return 0;
5759 }
5760
5761 /* Copy 16-bit Thumb(Thumb and 16-bit Thumb-2) instruction without any
5762 modification. */
5763 static int
5764 thumb_copy_unmodified_16bit (struct gdbarch *gdbarch, unsigned int insn,
5765 const char *iname,
5766 struct displaced_step_closure *dsc)
5767 {
5768 if (debug_displaced)
5769 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x, "
5770 "opcode/class '%s' unmodified\n", insn,
5771 iname);
5772
5773 dsc->modinsn[0] = insn;
5774
5775 return 0;
5776 }
5777
5778 /* Preload instructions with immediate offset. */
5779
5780 static void
5781 cleanup_preload (struct gdbarch *gdbarch,
5782 struct regcache *regs, struct displaced_step_closure *dsc)
5783 {
5784 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5785 if (!dsc->u.preload.immed)
5786 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5787 }
5788
5789 static void
5790 install_preload (struct gdbarch *gdbarch, struct regcache *regs,
5791 struct displaced_step_closure *dsc, unsigned int rn)
5792 {
5793 ULONGEST rn_val;
5794 /* Preload instructions:
5795
5796 {pli/pld} [rn, #+/-imm]
5797 ->
5798 {pli/pld} [r0, #+/-imm]. */
5799
5800 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5801 rn_val = displaced_read_reg (regs, dsc, rn);
5802 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5803 dsc->u.preload.immed = 1;
5804
5805 dsc->cleanup = &cleanup_preload;
5806 }
5807
5808 static int
5809 arm_copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5810 struct displaced_step_closure *dsc)
5811 {
5812 unsigned int rn = bits (insn, 16, 19);
5813
5814 if (!insn_references_pc (insn, 0x000f0000ul))
5815 return arm_copy_unmodified (gdbarch, insn, "preload", dsc);
5816
5817 if (debug_displaced)
5818 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
5819 (unsigned long) insn);
5820
5821 dsc->modinsn[0] = insn & 0xfff0ffff;
5822
5823 install_preload (gdbarch, regs, dsc, rn);
5824
5825 return 0;
5826 }
5827
5828 static int
5829 thumb2_copy_preload (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
5830 struct regcache *regs, struct displaced_step_closure *dsc)
5831 {
5832 unsigned int rn = bits (insn1, 0, 3);
5833 unsigned int u_bit = bit (insn1, 7);
5834 int imm12 = bits (insn2, 0, 11);
5835 ULONGEST pc_val;
5836
5837 if (rn != ARM_PC_REGNUM)
5838 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "preload", dsc);
5839
5840 /* PC is only allowed to use in PLI (immediate,literal) Encoding T3, and
5841 PLD (literal) Encoding T1. */
5842 if (debug_displaced)
5843 fprintf_unfiltered (gdb_stdlog,
5844 "displaced: copying pld/pli pc (0x%x) %c imm12 %.4x\n",
5845 (unsigned int) dsc->insn_addr, u_bit ? '+' : '-',
5846 imm12);
5847
5848 if (!u_bit)
5849 imm12 = -1 * imm12;
5850
5851 /* Rewrite instruction {pli/pld} PC imm12 into:
5852 Prepare: tmp[0] <- r0, tmp[1] <- r1, r0 <- pc, r1 <- imm12
5853
5854 {pli/pld} [r0, r1]
5855
5856 Cleanup: r0 <- tmp[0], r1 <- tmp[1]. */
5857
5858 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5859 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5860
5861 pc_val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
5862
5863 displaced_write_reg (regs, dsc, 0, pc_val, CANNOT_WRITE_PC);
5864 displaced_write_reg (regs, dsc, 1, imm12, CANNOT_WRITE_PC);
5865 dsc->u.preload.immed = 0;
5866
5867 /* {pli/pld} [r0, r1] */
5868 dsc->modinsn[0] = insn1 & 0xfff0;
5869 dsc->modinsn[1] = 0xf001;
5870 dsc->numinsns = 2;
5871
5872 dsc->cleanup = &cleanup_preload;
5873 return 0;
5874 }
5875
5876 /* Preload instructions with register offset. */
5877
5878 static void
5879 install_preload_reg(struct gdbarch *gdbarch, struct regcache *regs,
5880 struct displaced_step_closure *dsc, unsigned int rn,
5881 unsigned int rm)
5882 {
5883 ULONGEST rn_val, rm_val;
5884
5885 /* Preload register-offset instructions:
5886
5887 {pli/pld} [rn, rm {, shift}]
5888 ->
5889 {pli/pld} [r0, r1 {, shift}]. */
5890
5891 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5892 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5893 rn_val = displaced_read_reg (regs, dsc, rn);
5894 rm_val = displaced_read_reg (regs, dsc, rm);
5895 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5896 displaced_write_reg (regs, dsc, 1, rm_val, CANNOT_WRITE_PC);
5897 dsc->u.preload.immed = 0;
5898
5899 dsc->cleanup = &cleanup_preload;
5900 }
5901
5902 static int
5903 arm_copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
5904 struct regcache *regs,
5905 struct displaced_step_closure *dsc)
5906 {
5907 unsigned int rn = bits (insn, 16, 19);
5908 unsigned int rm = bits (insn, 0, 3);
5909
5910
5911 if (!insn_references_pc (insn, 0x000f000ful))
5912 return arm_copy_unmodified (gdbarch, insn, "preload reg", dsc);
5913
5914 if (debug_displaced)
5915 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
5916 (unsigned long) insn);
5917
5918 dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
5919
5920 install_preload_reg (gdbarch, regs, dsc, rn, rm);
5921 return 0;
5922 }
5923
5924 /* Copy/cleanup coprocessor load and store instructions. */
5925
5926 static void
5927 cleanup_copro_load_store (struct gdbarch *gdbarch,
5928 struct regcache *regs,
5929 struct displaced_step_closure *dsc)
5930 {
5931 ULONGEST rn_val = displaced_read_reg (regs, dsc, 0);
5932
5933 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5934
5935 if (dsc->u.ldst.writeback)
5936 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, LOAD_WRITE_PC);
5937 }
5938
5939 static void
5940 install_copro_load_store (struct gdbarch *gdbarch, struct regcache *regs,
5941 struct displaced_step_closure *dsc,
5942 int writeback, unsigned int rn)
5943 {
5944 ULONGEST rn_val;
5945
5946 /* Coprocessor load/store instructions:
5947
5948 {stc/stc2} [<Rn>, #+/-imm] (and other immediate addressing modes)
5949 ->
5950 {stc/stc2} [r0, #+/-imm].
5951
5952 ldc/ldc2 are handled identically. */
5953
5954 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5955 rn_val = displaced_read_reg (regs, dsc, rn);
5956 /* PC should be 4-byte aligned. */
5957 rn_val = rn_val & 0xfffffffc;
5958 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5959
5960 dsc->u.ldst.writeback = writeback;
5961 dsc->u.ldst.rn = rn;
5962
5963 dsc->cleanup = &cleanup_copro_load_store;
5964 }
5965
5966 static int
5967 arm_copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
5968 struct regcache *regs,
5969 struct displaced_step_closure *dsc)
5970 {
5971 unsigned int rn = bits (insn, 16, 19);
5972
5973 if (!insn_references_pc (insn, 0x000f0000ul))
5974 return arm_copy_unmodified (gdbarch, insn, "copro load/store", dsc);
5975
5976 if (debug_displaced)
5977 fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
5978 "load/store insn %.8lx\n", (unsigned long) insn);
5979
5980 dsc->modinsn[0] = insn & 0xfff0ffff;
5981
5982 install_copro_load_store (gdbarch, regs, dsc, bit (insn, 25), rn);
5983
5984 return 0;
5985 }
5986
5987 static int
5988 thumb2_copy_copro_load_store (struct gdbarch *gdbarch, uint16_t insn1,
5989 uint16_t insn2, struct regcache *regs,
5990 struct displaced_step_closure *dsc)
5991 {
5992 unsigned int rn = bits (insn1, 0, 3);
5993
5994 if (rn != ARM_PC_REGNUM)
5995 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
5996 "copro load/store", dsc);
5997
5998 if (debug_displaced)
5999 fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
6000 "load/store insn %.4x%.4x\n", insn1, insn2);
6001
6002 dsc->modinsn[0] = insn1 & 0xfff0;
6003 dsc->modinsn[1] = insn2;
6004 dsc->numinsns = 2;
6005
6006 /* This function is called for copying instruction LDC/LDC2/VLDR, which
6007 doesn't support writeback, so pass 0. */
6008 install_copro_load_store (gdbarch, regs, dsc, 0, rn);
6009
6010 return 0;
6011 }
6012
6013 /* Clean up branch instructions (actually perform the branch, by setting
6014 PC). */
6015
6016 static void
6017 cleanup_branch (struct gdbarch *gdbarch, struct regcache *regs,
6018 struct displaced_step_closure *dsc)
6019 {
6020 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6021 int branch_taken = condition_true (dsc->u.branch.cond, status);
6022 enum pc_write_style write_pc = dsc->u.branch.exchange
6023 ? BX_WRITE_PC : BRANCH_WRITE_PC;
6024
6025 if (!branch_taken)
6026 return;
6027
6028 if (dsc->u.branch.link)
6029 {
6030 /* The value of LR should be the next insn of current one. In order
6031 not to confuse logic hanlding later insn `bx lr', if current insn mode
6032 is Thumb, the bit 0 of LR value should be set to 1. */
6033 ULONGEST next_insn_addr = dsc->insn_addr + dsc->insn_size;
6034
6035 if (dsc->is_thumb)
6036 next_insn_addr |= 0x1;
6037
6038 displaced_write_reg (regs, dsc, ARM_LR_REGNUM, next_insn_addr,
6039 CANNOT_WRITE_PC);
6040 }
6041
6042 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, dsc->u.branch.dest, write_pc);
6043 }
6044
6045 /* Copy B/BL/BLX instructions with immediate destinations. */
6046
6047 static void
6048 install_b_bl_blx (struct gdbarch *gdbarch, struct regcache *regs,
6049 struct displaced_step_closure *dsc,
6050 unsigned int cond, int exchange, int link, long offset)
6051 {
6052 /* Implement "BL<cond> <label>" as:
6053
6054 Preparation: cond <- instruction condition
6055 Insn: mov r0, r0 (nop)
6056 Cleanup: if (condition true) { r14 <- pc; pc <- label }.
6057
6058 B<cond> similar, but don't set r14 in cleanup. */
6059
6060 dsc->u.branch.cond = cond;
6061 dsc->u.branch.link = link;
6062 dsc->u.branch.exchange = exchange;
6063
6064 dsc->u.branch.dest = dsc->insn_addr;
6065 if (link && exchange)
6066 /* For BLX, offset is computed from the Align (PC, 4). */
6067 dsc->u.branch.dest = dsc->u.branch.dest & 0xfffffffc;
6068
6069 if (dsc->is_thumb)
6070 dsc->u.branch.dest += 4 + offset;
6071 else
6072 dsc->u.branch.dest += 8 + offset;
6073
6074 dsc->cleanup = &cleanup_branch;
6075 }
6076 static int
6077 arm_copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
6078 struct regcache *regs, struct displaced_step_closure *dsc)
6079 {
6080 unsigned int cond = bits (insn, 28, 31);
6081 int exchange = (cond == 0xf);
6082 int link = exchange || bit (insn, 24);
6083 long offset;
6084
6085 if (debug_displaced)
6086 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
6087 "%.8lx\n", (exchange) ? "blx" : (link) ? "bl" : "b",
6088 (unsigned long) insn);
6089 if (exchange)
6090 /* For BLX, set bit 0 of the destination. The cleanup_branch function will
6091 then arrange the switch into Thumb mode. */
6092 offset = (bits (insn, 0, 23) << 2) | (bit (insn, 24) << 1) | 1;
6093 else
6094 offset = bits (insn, 0, 23) << 2;
6095
6096 if (bit (offset, 25))
6097 offset = offset | ~0x3ffffff;
6098
6099 dsc->modinsn[0] = ARM_NOP;
6100
6101 install_b_bl_blx (gdbarch, regs, dsc, cond, exchange, link, offset);
6102 return 0;
6103 }
6104
6105 static int
6106 thumb2_copy_b_bl_blx (struct gdbarch *gdbarch, uint16_t insn1,
6107 uint16_t insn2, struct regcache *regs,
6108 struct displaced_step_closure *dsc)
6109 {
6110 int link = bit (insn2, 14);
6111 int exchange = link && !bit (insn2, 12);
6112 int cond = INST_AL;
6113 long offset = 0;
6114 int j1 = bit (insn2, 13);
6115 int j2 = bit (insn2, 11);
6116 int s = sbits (insn1, 10, 10);
6117 int i1 = !(j1 ^ bit (insn1, 10));
6118 int i2 = !(j2 ^ bit (insn1, 10));
6119
6120 if (!link && !exchange) /* B */
6121 {
6122 offset = (bits (insn2, 0, 10) << 1);
6123 if (bit (insn2, 12)) /* Encoding T4 */
6124 {
6125 offset |= (bits (insn1, 0, 9) << 12)
6126 | (i2 << 22)
6127 | (i1 << 23)
6128 | (s << 24);
6129 cond = INST_AL;
6130 }
6131 else /* Encoding T3 */
6132 {
6133 offset |= (bits (insn1, 0, 5) << 12)
6134 | (j1 << 18)
6135 | (j2 << 19)
6136 | (s << 20);
6137 cond = bits (insn1, 6, 9);
6138 }
6139 }
6140 else
6141 {
6142 offset = (bits (insn1, 0, 9) << 12);
6143 offset |= ((i2 << 22) | (i1 << 23) | (s << 24));
6144 offset |= exchange ?
6145 (bits (insn2, 1, 10) << 2) : (bits (insn2, 0, 10) << 1);
6146 }
6147
6148 if (debug_displaced)
6149 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s insn "
6150 "%.4x %.4x with offset %.8lx\n",
6151 link ? (exchange) ? "blx" : "bl" : "b",
6152 insn1, insn2, offset);
6153
6154 dsc->modinsn[0] = THUMB_NOP;
6155
6156 install_b_bl_blx (gdbarch, regs, dsc, cond, exchange, link, offset);
6157 return 0;
6158 }
6159
6160 /* Copy B Thumb instructions. */
6161 static int
6162 thumb_copy_b (struct gdbarch *gdbarch, unsigned short insn,
6163 struct displaced_step_closure *dsc)
6164 {
6165 unsigned int cond = 0;
6166 int offset = 0;
6167 unsigned short bit_12_15 = bits (insn, 12, 15);
6168 CORE_ADDR from = dsc->insn_addr;
6169
6170 if (bit_12_15 == 0xd)
6171 {
6172 /* offset = SignExtend (imm8:0, 32) */
6173 offset = sbits ((insn << 1), 0, 8);
6174 cond = bits (insn, 8, 11);
6175 }
6176 else if (bit_12_15 == 0xe) /* Encoding T2 */
6177 {
6178 offset = sbits ((insn << 1), 0, 11);
6179 cond = INST_AL;
6180 }
6181
6182 if (debug_displaced)
6183 fprintf_unfiltered (gdb_stdlog,
6184 "displaced: copying b immediate insn %.4x "
6185 "with offset %d\n", insn, offset);
6186
6187 dsc->u.branch.cond = cond;
6188 dsc->u.branch.link = 0;
6189 dsc->u.branch.exchange = 0;
6190 dsc->u.branch.dest = from + 4 + offset;
6191
6192 dsc->modinsn[0] = THUMB_NOP;
6193
6194 dsc->cleanup = &cleanup_branch;
6195
6196 return 0;
6197 }
6198
6199 /* Copy BX/BLX with register-specified destinations. */
6200
6201 static void
6202 install_bx_blx_reg (struct gdbarch *gdbarch, struct regcache *regs,
6203 struct displaced_step_closure *dsc, int link,
6204 unsigned int cond, unsigned int rm)
6205 {
6206 /* Implement {BX,BLX}<cond> <reg>" as:
6207
6208 Preparation: cond <- instruction condition
6209 Insn: mov r0, r0 (nop)
6210 Cleanup: if (condition true) { r14 <- pc; pc <- dest; }.
6211
6212 Don't set r14 in cleanup for BX. */
6213
6214 dsc->u.branch.dest = displaced_read_reg (regs, dsc, rm);
6215
6216 dsc->u.branch.cond = cond;
6217 dsc->u.branch.link = link;
6218
6219 dsc->u.branch.exchange = 1;
6220
6221 dsc->cleanup = &cleanup_branch;
6222 }
6223
6224 static int
6225 arm_copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
6226 struct regcache *regs, struct displaced_step_closure *dsc)
6227 {
6228 unsigned int cond = bits (insn, 28, 31);
6229 /* BX: x12xxx1x
6230 BLX: x12xxx3x. */
6231 int link = bit (insn, 5);
6232 unsigned int rm = bits (insn, 0, 3);
6233
6234 if (debug_displaced)
6235 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx",
6236 (unsigned long) insn);
6237
6238 dsc->modinsn[0] = ARM_NOP;
6239
6240 install_bx_blx_reg (gdbarch, regs, dsc, link, cond, rm);
6241 return 0;
6242 }
6243
6244 static int
6245 thumb_copy_bx_blx_reg (struct gdbarch *gdbarch, uint16_t insn,
6246 struct regcache *regs,
6247 struct displaced_step_closure *dsc)
6248 {
6249 int link = bit (insn, 7);
6250 unsigned int rm = bits (insn, 3, 6);
6251
6252 if (debug_displaced)
6253 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x",
6254 (unsigned short) insn);
6255
6256 dsc->modinsn[0] = THUMB_NOP;
6257
6258 install_bx_blx_reg (gdbarch, regs, dsc, link, INST_AL, rm);
6259
6260 return 0;
6261 }
6262
6263
6264 /* Copy/cleanup arithmetic/logic instruction with immediate RHS. */
6265
6266 static void
6267 cleanup_alu_imm (struct gdbarch *gdbarch,
6268 struct regcache *regs, struct displaced_step_closure *dsc)
6269 {
6270 ULONGEST rd_val = displaced_read_reg (regs, dsc, 0);
6271 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
6272 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
6273 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
6274 }
6275
6276 static int
6277 arm_copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
6278 struct displaced_step_closure *dsc)
6279 {
6280 unsigned int rn = bits (insn, 16, 19);
6281 unsigned int rd = bits (insn, 12, 15);
6282 unsigned int op = bits (insn, 21, 24);
6283 int is_mov = (op == 0xd);
6284 ULONGEST rd_val, rn_val;
6285
6286 if (!insn_references_pc (insn, 0x000ff000ul))
6287 return arm_copy_unmodified (gdbarch, insn, "ALU immediate", dsc);
6288
6289 if (debug_displaced)
6290 fprintf_unfiltered (gdb_stdlog, "displaced: copying immediate %s insn "
6291 "%.8lx\n", is_mov ? "move" : "ALU",
6292 (unsigned long) insn);
6293
6294 /* Instruction is of form:
6295
6296 <op><cond> rd, [rn,] #imm
6297
6298 Rewrite as:
6299
6300 Preparation: tmp1, tmp2 <- r0, r1;
6301 r0, r1 <- rd, rn
6302 Insn: <op><cond> r0, r1, #imm
6303 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
6304 */
6305
6306 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6307 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
6308 rn_val = displaced_read_reg (regs, dsc, rn);
6309 rd_val = displaced_read_reg (regs, dsc, rd);
6310 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
6311 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
6312 dsc->rd = rd;
6313
6314 if (is_mov)
6315 dsc->modinsn[0] = insn & 0xfff00fff;
6316 else
6317 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x10000;
6318
6319 dsc->cleanup = &cleanup_alu_imm;
6320
6321 return 0;
6322 }
6323
6324 static int
6325 thumb2_copy_alu_imm (struct gdbarch *gdbarch, uint16_t insn1,
6326 uint16_t insn2, struct regcache *regs,
6327 struct displaced_step_closure *dsc)
6328 {
6329 unsigned int op = bits (insn1, 5, 8);
6330 unsigned int rn, rm, rd;
6331 ULONGEST rd_val, rn_val;
6332
6333 rn = bits (insn1, 0, 3); /* Rn */
6334 rm = bits (insn2, 0, 3); /* Rm */
6335 rd = bits (insn2, 8, 11); /* Rd */
6336
6337 /* This routine is only called for instruction MOV. */
6338 gdb_assert (op == 0x2 && rn == 0xf);
6339
6340 if (rm != ARM_PC_REGNUM && rd != ARM_PC_REGNUM)
6341 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "ALU imm", dsc);
6342
6343 if (debug_displaced)
6344 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.4x%.4x\n",
6345 "ALU", insn1, insn2);
6346
6347 /* Instruction is of form:
6348
6349 <op><cond> rd, [rn,] #imm
6350
6351 Rewrite as:
6352
6353 Preparation: tmp1, tmp2 <- r0, r1;
6354 r0, r1 <- rd, rn
6355 Insn: <op><cond> r0, r1, #imm
6356 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
6357 */
6358
6359 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6360 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
6361 rn_val = displaced_read_reg (regs, dsc, rn);
6362 rd_val = displaced_read_reg (regs, dsc, rd);
6363 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
6364 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
6365 dsc->rd = rd;
6366
6367 dsc->modinsn[0] = insn1;
6368 dsc->modinsn[1] = ((insn2 & 0xf0f0) | 0x1);
6369 dsc->numinsns = 2;
6370
6371 dsc->cleanup = &cleanup_alu_imm;
6372
6373 return 0;
6374 }
6375
6376 /* Copy/cleanup arithmetic/logic insns with register RHS. */
6377
6378 static void
6379 cleanup_alu_reg (struct gdbarch *gdbarch,
6380 struct regcache *regs, struct displaced_step_closure *dsc)
6381 {
6382 ULONGEST rd_val;
6383 int i;
6384
6385 rd_val = displaced_read_reg (regs, dsc, 0);
6386
6387 for (i = 0; i < 3; i++)
6388 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
6389
6390 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
6391 }
6392
6393 static void
6394 install_alu_reg (struct gdbarch *gdbarch, struct regcache *regs,
6395 struct displaced_step_closure *dsc,
6396 unsigned int rd, unsigned int rn, unsigned int rm)
6397 {
6398 ULONGEST rd_val, rn_val, rm_val;
6399
6400 /* Instruction is of form:
6401
6402 <op><cond> rd, [rn,] rm [, <shift>]
6403
6404 Rewrite as:
6405
6406 Preparation: tmp1, tmp2, tmp3 <- r0, r1, r2;
6407 r0, r1, r2 <- rd, rn, rm
6408 Insn: <op><cond> r0, r1, r2 [, <shift>]
6409 Cleanup: rd <- r0; r0, r1, r2 <- tmp1, tmp2, tmp3
6410 */
6411
6412 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6413 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
6414 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
6415 rd_val = displaced_read_reg (regs, dsc, rd);
6416 rn_val = displaced_read_reg (regs, dsc, rn);
6417 rm_val = displaced_read_reg (regs, dsc, rm);
6418 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
6419 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
6420 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
6421 dsc->rd = rd;
6422
6423 dsc->cleanup = &cleanup_alu_reg;
6424 }
6425
6426 static int
6427 arm_copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
6428 struct displaced_step_closure *dsc)
6429 {
6430 unsigned int op = bits (insn, 21, 24);
6431 int is_mov = (op == 0xd);
6432
6433 if (!insn_references_pc (insn, 0x000ff00ful))
6434 return arm_copy_unmodified (gdbarch, insn, "ALU reg", dsc);
6435
6436 if (debug_displaced)
6437 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.8lx\n",
6438 is_mov ? "move" : "ALU", (unsigned long) insn);
6439
6440 if (is_mov)
6441 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x2;
6442 else
6443 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x10002;
6444
6445 install_alu_reg (gdbarch, regs, dsc, bits (insn, 12, 15), bits (insn, 16, 19),
6446 bits (insn, 0, 3));
6447 return 0;
6448 }
6449
6450 static int
6451 thumb_copy_alu_reg (struct gdbarch *gdbarch, uint16_t insn,
6452 struct regcache *regs,
6453 struct displaced_step_closure *dsc)
6454 {
6455 unsigned rn, rm, rd;
6456
6457 rd = bits (insn, 3, 6);
6458 rn = (bit (insn, 7) << 3) | bits (insn, 0, 2);
6459 rm = 2;
6460
6461 if (rd != ARM_PC_REGNUM && rn != ARM_PC_REGNUM)
6462 return thumb_copy_unmodified_16bit (gdbarch, insn, "ALU reg", dsc);
6463
6464 if (debug_displaced)
6465 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.4x\n",
6466 "ALU", (unsigned short) insn);
6467
6468 dsc->modinsn[0] = ((insn & 0xff00) | 0x08);
6469
6470 install_alu_reg (gdbarch, regs, dsc, rd, rn, rm);
6471
6472 return 0;
6473 }
6474
6475 /* Cleanup/copy arithmetic/logic insns with shifted register RHS. */
6476
6477 static void
6478 cleanup_alu_shifted_reg (struct gdbarch *gdbarch,
6479 struct regcache *regs,
6480 struct displaced_step_closure *dsc)
6481 {
6482 ULONGEST rd_val = displaced_read_reg (regs, dsc, 0);
6483 int i;
6484
6485 for (i = 0; i < 4; i++)
6486 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
6487
6488 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
6489 }
6490
6491 static void
6492 install_alu_shifted_reg (struct gdbarch *gdbarch, struct regcache *regs,
6493 struct displaced_step_closure *dsc,
6494 unsigned int rd, unsigned int rn, unsigned int rm,
6495 unsigned rs)
6496 {
6497 int i;
6498 ULONGEST rd_val, rn_val, rm_val, rs_val;
6499
6500 /* Instruction is of form:
6501
6502 <op><cond> rd, [rn,] rm, <shift> rs
6503
6504 Rewrite as:
6505
6506 Preparation: tmp1, tmp2, tmp3, tmp4 <- r0, r1, r2, r3
6507 r0, r1, r2, r3 <- rd, rn, rm, rs
6508 Insn: <op><cond> r0, r1, r2, <shift> r3
6509 Cleanup: tmp5 <- r0
6510 r0, r1, r2, r3 <- tmp1, tmp2, tmp3, tmp4
6511 rd <- tmp5
6512 */
6513
6514 for (i = 0; i < 4; i++)
6515 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
6516
6517 rd_val = displaced_read_reg (regs, dsc, rd);
6518 rn_val = displaced_read_reg (regs, dsc, rn);
6519 rm_val = displaced_read_reg (regs, dsc, rm);
6520 rs_val = displaced_read_reg (regs, dsc, rs);
6521 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
6522 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
6523 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
6524 displaced_write_reg (regs, dsc, 3, rs_val, CANNOT_WRITE_PC);
6525 dsc->rd = rd;
6526 dsc->cleanup = &cleanup_alu_shifted_reg;
6527 }
6528
6529 static int
6530 arm_copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
6531 struct regcache *regs,
6532 struct displaced_step_closure *dsc)
6533 {
6534 unsigned int op = bits (insn, 21, 24);
6535 int is_mov = (op == 0xd);
6536 unsigned int rd, rn, rm, rs;
6537
6538 if (!insn_references_pc (insn, 0x000fff0ful))
6539 return arm_copy_unmodified (gdbarch, insn, "ALU shifted reg", dsc);
6540
6541 if (debug_displaced)
6542 fprintf_unfiltered (gdb_stdlog, "displaced: copying shifted reg %s insn "
6543 "%.8lx\n", is_mov ? "move" : "ALU",
6544 (unsigned long) insn);
6545
6546 rn = bits (insn, 16, 19);
6547 rm = bits (insn, 0, 3);
6548 rs = bits (insn, 8, 11);
6549 rd = bits (insn, 12, 15);
6550
6551 if (is_mov)
6552 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x302;
6553 else
6554 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x10302;
6555
6556 install_alu_shifted_reg (gdbarch, regs, dsc, rd, rn, rm, rs);
6557
6558 return 0;
6559 }
6560
6561 /* Clean up load instructions. */
6562
6563 static void
6564 cleanup_load (struct gdbarch *gdbarch, struct regcache *regs,
6565 struct displaced_step_closure *dsc)
6566 {
6567 ULONGEST rt_val, rt_val2 = 0, rn_val;
6568
6569 rt_val = displaced_read_reg (regs, dsc, 0);
6570 if (dsc->u.ldst.xfersize == 8)
6571 rt_val2 = displaced_read_reg (regs, dsc, 1);
6572 rn_val = displaced_read_reg (regs, dsc, 2);
6573
6574 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
6575 if (dsc->u.ldst.xfersize > 4)
6576 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
6577 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
6578 if (!dsc->u.ldst.immed)
6579 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
6580
6581 /* Handle register writeback. */
6582 if (dsc->u.ldst.writeback)
6583 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
6584 /* Put result in right place. */
6585 displaced_write_reg (regs, dsc, dsc->rd, rt_val, LOAD_WRITE_PC);
6586 if (dsc->u.ldst.xfersize == 8)
6587 displaced_write_reg (regs, dsc, dsc->rd + 1, rt_val2, LOAD_WRITE_PC);
6588 }
6589
6590 /* Clean up store instructions. */
6591
6592 static void
6593 cleanup_store (struct gdbarch *gdbarch, struct regcache *regs,
6594 struct displaced_step_closure *dsc)
6595 {
6596 ULONGEST rn_val = displaced_read_reg (regs, dsc, 2);
6597
6598 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
6599 if (dsc->u.ldst.xfersize > 4)
6600 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
6601 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
6602 if (!dsc->u.ldst.immed)
6603 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
6604 if (!dsc->u.ldst.restore_r4)
6605 displaced_write_reg (regs, dsc, 4, dsc->tmp[4], CANNOT_WRITE_PC);
6606
6607 /* Writeback. */
6608 if (dsc->u.ldst.writeback)
6609 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
6610 }
6611
6612 /* Copy "extra" load/store instructions. These are halfword/doubleword
6613 transfers, which have a different encoding to byte/word transfers. */
6614
6615 static int
6616 arm_copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
6617 struct regcache *regs, struct displaced_step_closure *dsc)
6618 {
6619 unsigned int op1 = bits (insn, 20, 24);
6620 unsigned int op2 = bits (insn, 5, 6);
6621 unsigned int rt = bits (insn, 12, 15);
6622 unsigned int rn = bits (insn, 16, 19);
6623 unsigned int rm = bits (insn, 0, 3);
6624 char load[12] = {0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1};
6625 char bytesize[12] = {2, 2, 2, 2, 8, 1, 8, 1, 8, 2, 8, 2};
6626 int immed = (op1 & 0x4) != 0;
6627 int opcode;
6628 ULONGEST rt_val, rt_val2 = 0, rn_val, rm_val = 0;
6629
6630 if (!insn_references_pc (insn, 0x000ff00ful))
6631 return arm_copy_unmodified (gdbarch, insn, "extra load/store", dsc);
6632
6633 if (debug_displaced)
6634 fprintf_unfiltered (gdb_stdlog, "displaced: copying %sextra load/store "
6635 "insn %.8lx\n", unpriveleged ? "unpriveleged " : "",
6636 (unsigned long) insn);
6637
6638 opcode = ((op2 << 2) | (op1 & 0x1) | ((op1 & 0x4) >> 1)) - 4;
6639
6640 if (opcode < 0)
6641 internal_error (__FILE__, __LINE__,
6642 _("copy_extra_ld_st: instruction decode error"));
6643
6644 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6645 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
6646 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
6647 if (!immed)
6648 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
6649
6650 rt_val = displaced_read_reg (regs, dsc, rt);
6651 if (bytesize[opcode] == 8)
6652 rt_val2 = displaced_read_reg (regs, dsc, rt + 1);
6653 rn_val = displaced_read_reg (regs, dsc, rn);
6654 if (!immed)
6655 rm_val = displaced_read_reg (regs, dsc, rm);
6656
6657 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
6658 if (bytesize[opcode] == 8)
6659 displaced_write_reg (regs, dsc, 1, rt_val2, CANNOT_WRITE_PC);
6660 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
6661 if (!immed)
6662 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
6663
6664 dsc->rd = rt;
6665 dsc->u.ldst.xfersize = bytesize[opcode];
6666 dsc->u.ldst.rn = rn;
6667 dsc->u.ldst.immed = immed;
6668 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
6669 dsc->u.ldst.restore_r4 = 0;
6670
6671 if (immed)
6672 /* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm]
6673 ->
6674 {ldr,str}<width><cond> r0, [r1,] [r2, #imm]. */
6675 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
6676 else
6677 /* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm]
6678 ->
6679 {ldr,str}<width><cond> r0, [r1,] [r2, +/-r3]. */
6680 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
6681
6682 dsc->cleanup = load[opcode] ? &cleanup_load : &cleanup_store;
6683
6684 return 0;
6685 }
6686
6687 /* Copy byte/half word/word loads and stores. */
6688
6689 static void
6690 install_load_store (struct gdbarch *gdbarch, struct regcache *regs,
6691 struct displaced_step_closure *dsc, int load,
6692 int immed, int writeback, int size, int usermode,
6693 int rt, int rm, int rn)
6694 {
6695 ULONGEST rt_val, rn_val, rm_val = 0;
6696
6697 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6698 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
6699 if (!immed)
6700 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
6701 if (!load)
6702 dsc->tmp[4] = displaced_read_reg (regs, dsc, 4);
6703
6704 rt_val = displaced_read_reg (regs, dsc, rt);
6705 rn_val = displaced_read_reg (regs, dsc, rn);
6706 if (!immed)
6707 rm_val = displaced_read_reg (regs, dsc, rm);
6708
6709 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
6710 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
6711 if (!immed)
6712 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
6713 dsc->rd = rt;
6714 dsc->u.ldst.xfersize = size;
6715 dsc->u.ldst.rn = rn;
6716 dsc->u.ldst.immed = immed;
6717 dsc->u.ldst.writeback = writeback;
6718
6719 /* To write PC we can do:
6720
6721 Before this sequence of instructions:
6722 r0 is the PC value got from displaced_read_reg, so r0 = from + 8;
6723 r2 is the Rn value got from dispalced_read_reg.
6724
6725 Insn1: push {pc} Write address of STR instruction + offset on stack
6726 Insn2: pop {r4} Read it back from stack, r4 = addr(Insn1) + offset
6727 Insn3: sub r4, r4, pc r4 = addr(Insn1) + offset - pc
6728 = addr(Insn1) + offset - addr(Insn3) - 8
6729 = offset - 16
6730 Insn4: add r4, r4, #8 r4 = offset - 8
6731 Insn5: add r0, r0, r4 r0 = from + 8 + offset - 8
6732 = from + offset
6733 Insn6: str r0, [r2, #imm] (or str r0, [r2, r3])
6734
6735 Otherwise we don't know what value to write for PC, since the offset is
6736 architecture-dependent (sometimes PC+8, sometimes PC+12). More details
6737 of this can be found in Section "Saving from r15" in
6738 http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0204g/Cihbjifh.html */
6739
6740 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
6741 }
6742
6743
6744 static int
6745 thumb2_copy_load_literal (struct gdbarch *gdbarch, uint16_t insn1,
6746 uint16_t insn2, struct regcache *regs,
6747 struct displaced_step_closure *dsc, int size)
6748 {
6749 unsigned int u_bit = bit (insn1, 7);
6750 unsigned int rt = bits (insn2, 12, 15);
6751 int imm12 = bits (insn2, 0, 11);
6752 ULONGEST pc_val;
6753
6754 if (debug_displaced)
6755 fprintf_unfiltered (gdb_stdlog,
6756 "displaced: copying ldr pc (0x%x) R%d %c imm12 %.4x\n",
6757 (unsigned int) dsc->insn_addr, rt, u_bit ? '+' : '-',
6758 imm12);
6759
6760 if (!u_bit)
6761 imm12 = -1 * imm12;
6762
6763 /* Rewrite instruction LDR Rt imm12 into:
6764
6765 Prepare: tmp[0] <- r0, tmp[1] <- r2, tmp[2] <- r3, r2 <- pc, r3 <- imm12
6766
6767 LDR R0, R2, R3,
6768
6769 Cleanup: rt <- r0, r0 <- tmp[0], r2 <- tmp[1], r3 <- tmp[2]. */
6770
6771
6772 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6773 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
6774 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
6775
6776 pc_val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
6777
6778 pc_val = pc_val & 0xfffffffc;
6779
6780 displaced_write_reg (regs, dsc, 2, pc_val, CANNOT_WRITE_PC);
6781 displaced_write_reg (regs, dsc, 3, imm12, CANNOT_WRITE_PC);
6782
6783 dsc->rd = rt;
6784
6785 dsc->u.ldst.xfersize = size;
6786 dsc->u.ldst.immed = 0;
6787 dsc->u.ldst.writeback = 0;
6788 dsc->u.ldst.restore_r4 = 0;
6789
6790 /* LDR R0, R2, R3 */
6791 dsc->modinsn[0] = 0xf852;
6792 dsc->modinsn[1] = 0x3;
6793 dsc->numinsns = 2;
6794
6795 dsc->cleanup = &cleanup_load;
6796
6797 return 0;
6798 }
6799
6800 static int
6801 thumb2_copy_load_reg_imm (struct gdbarch *gdbarch, uint16_t insn1,
6802 uint16_t insn2, struct regcache *regs,
6803 struct displaced_step_closure *dsc,
6804 int writeback, int immed)
6805 {
6806 unsigned int rt = bits (insn2, 12, 15);
6807 unsigned int rn = bits (insn1, 0, 3);
6808 unsigned int rm = bits (insn2, 0, 3); /* Only valid if !immed. */
6809 /* In LDR (register), there is also a register Rm, which is not allowed to
6810 be PC, so we don't have to check it. */
6811
6812 if (rt != ARM_PC_REGNUM && rn != ARM_PC_REGNUM)
6813 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "load",
6814 dsc);
6815
6816 if (debug_displaced)
6817 fprintf_unfiltered (gdb_stdlog,
6818 "displaced: copying ldr r%d [r%d] insn %.4x%.4x\n",
6819 rt, rn, insn1, insn2);
6820
6821 install_load_store (gdbarch, regs, dsc, 1, immed, writeback, 4,
6822 0, rt, rm, rn);
6823
6824 dsc->u.ldst.restore_r4 = 0;
6825
6826 if (immed)
6827 /* ldr[b]<cond> rt, [rn, #imm], etc.
6828 ->
6829 ldr[b]<cond> r0, [r2, #imm]. */
6830 {
6831 dsc->modinsn[0] = (insn1 & 0xfff0) | 0x2;
6832 dsc->modinsn[1] = insn2 & 0x0fff;
6833 }
6834 else
6835 /* ldr[b]<cond> rt, [rn, rm], etc.
6836 ->
6837 ldr[b]<cond> r0, [r2, r3]. */
6838 {
6839 dsc->modinsn[0] = (insn1 & 0xfff0) | 0x2;
6840 dsc->modinsn[1] = (insn2 & 0x0ff0) | 0x3;
6841 }
6842
6843 dsc->numinsns = 2;
6844
6845 return 0;
6846 }
6847
6848
6849 static int
6850 arm_copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
6851 struct regcache *regs,
6852 struct displaced_step_closure *dsc,
6853 int load, int size, int usermode)
6854 {
6855 int immed = !bit (insn, 25);
6856 int writeback = (bit (insn, 24) == 0 || bit (insn, 21) != 0);
6857 unsigned int rt = bits (insn, 12, 15);
6858 unsigned int rn = bits (insn, 16, 19);
6859 unsigned int rm = bits (insn, 0, 3); /* Only valid if !immed. */
6860
6861 if (!insn_references_pc (insn, 0x000ff00ful))
6862 return arm_copy_unmodified (gdbarch, insn, "load/store", dsc);
6863
6864 if (debug_displaced)
6865 fprintf_unfiltered (gdb_stdlog,
6866 "displaced: copying %s%s r%d [r%d] insn %.8lx\n",
6867 load ? (size == 1 ? "ldrb" : "ldr")
6868 : (size == 1 ? "strb" : "str"), usermode ? "t" : "",
6869 rt, rn,
6870 (unsigned long) insn);
6871
6872 install_load_store (gdbarch, regs, dsc, load, immed, writeback, size,
6873 usermode, rt, rm, rn);
6874
6875 if (load || rt != ARM_PC_REGNUM)
6876 {
6877 dsc->u.ldst.restore_r4 = 0;
6878
6879 if (immed)
6880 /* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
6881 ->
6882 {ldr,str}[b]<cond> r0, [r2, #imm]. */
6883 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
6884 else
6885 /* {ldr,str}[b]<cond> rt, [rn, rm], etc.
6886 ->
6887 {ldr,str}[b]<cond> r0, [r2, r3]. */
6888 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
6889 }
6890 else
6891 {
6892 /* We need to use r4 as scratch. Make sure it's restored afterwards. */
6893 dsc->u.ldst.restore_r4 = 1;
6894 dsc->modinsn[0] = 0xe92d8000; /* push {pc} */
6895 dsc->modinsn[1] = 0xe8bd0010; /* pop {r4} */
6896 dsc->modinsn[2] = 0xe044400f; /* sub r4, r4, pc. */
6897 dsc->modinsn[3] = 0xe2844008; /* add r4, r4, #8. */
6898 dsc->modinsn[4] = 0xe0800004; /* add r0, r0, r4. */
6899
6900 /* As above. */
6901 if (immed)
6902 dsc->modinsn[5] = (insn & 0xfff00fff) | 0x20000;
6903 else
6904 dsc->modinsn[5] = (insn & 0xfff00ff0) | 0x20003;
6905
6906 dsc->numinsns = 6;
6907 }
6908
6909 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
6910
6911 return 0;
6912 }
6913
6914 /* Cleanup LDM instructions with fully-populated register list. This is an
6915 unfortunate corner case: it's impossible to implement correctly by modifying
6916 the instruction. The issue is as follows: we have an instruction,
6917
6918 ldm rN, {r0-r15}
6919
6920 which we must rewrite to avoid loading PC. A possible solution would be to
6921 do the load in two halves, something like (with suitable cleanup
6922 afterwards):
6923
6924 mov r8, rN
6925 ldm[id][ab] r8!, {r0-r7}
6926 str r7, <temp>
6927 ldm[id][ab] r8, {r7-r14}
6928 <bkpt>
6929
6930 but at present there's no suitable place for <temp>, since the scratch space
6931 is overwritten before the cleanup routine is called. For now, we simply
6932 emulate the instruction. */
6933
6934 static void
6935 cleanup_block_load_all (struct gdbarch *gdbarch, struct regcache *regs,
6936 struct displaced_step_closure *dsc)
6937 {
6938 int inc = dsc->u.block.increment;
6939 int bump_before = dsc->u.block.before ? (inc ? 4 : -4) : 0;
6940 int bump_after = dsc->u.block.before ? 0 : (inc ? 4 : -4);
6941 uint32_t regmask = dsc->u.block.regmask;
6942 int regno = inc ? 0 : 15;
6943 CORE_ADDR xfer_addr = dsc->u.block.xfer_addr;
6944 int exception_return = dsc->u.block.load && dsc->u.block.user
6945 && (regmask & 0x8000) != 0;
6946 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6947 int do_transfer = condition_true (dsc->u.block.cond, status);
6948 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6949
6950 if (!do_transfer)
6951 return;
6952
6953 /* If the instruction is ldm rN, {...pc}^, I don't think there's anything
6954 sensible we can do here. Complain loudly. */
6955 if (exception_return)
6956 error (_("Cannot single-step exception return"));
6957
6958 /* We don't handle any stores here for now. */
6959 gdb_assert (dsc->u.block.load != 0);
6960
6961 if (debug_displaced)
6962 fprintf_unfiltered (gdb_stdlog, "displaced: emulating block transfer: "
6963 "%s %s %s\n", dsc->u.block.load ? "ldm" : "stm",
6964 dsc->u.block.increment ? "inc" : "dec",
6965 dsc->u.block.before ? "before" : "after");
6966
6967 while (regmask)
6968 {
6969 uint32_t memword;
6970
6971 if (inc)
6972 while (regno <= ARM_PC_REGNUM && (regmask & (1 << regno)) == 0)
6973 regno++;
6974 else
6975 while (regno >= 0 && (regmask & (1 << regno)) == 0)
6976 regno--;
6977
6978 xfer_addr += bump_before;
6979
6980 memword = read_memory_unsigned_integer (xfer_addr, 4, byte_order);
6981 displaced_write_reg (regs, dsc, regno, memword, LOAD_WRITE_PC);
6982
6983 xfer_addr += bump_after;
6984
6985 regmask &= ~(1 << regno);
6986 }
6987
6988 if (dsc->u.block.writeback)
6989 displaced_write_reg (regs, dsc, dsc->u.block.rn, xfer_addr,
6990 CANNOT_WRITE_PC);
6991 }
6992
6993 /* Clean up an STM which included the PC in the register list. */
6994
6995 static void
6996 cleanup_block_store_pc (struct gdbarch *gdbarch, struct regcache *regs,
6997 struct displaced_step_closure *dsc)
6998 {
6999 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
7000 int store_executed = condition_true (dsc->u.block.cond, status);
7001 CORE_ADDR pc_stored_at, transferred_regs = bitcount (dsc->u.block.regmask);
7002 CORE_ADDR stm_insn_addr;
7003 uint32_t pc_val;
7004 long offset;
7005 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
7006
7007 /* If condition code fails, there's nothing else to do. */
7008 if (!store_executed)
7009 return;
7010
7011 if (dsc->u.block.increment)
7012 {
7013 pc_stored_at = dsc->u.block.xfer_addr + 4 * transferred_regs;
7014
7015 if (dsc->u.block.before)
7016 pc_stored_at += 4;
7017 }
7018 else
7019 {
7020 pc_stored_at = dsc->u.block.xfer_addr;
7021
7022 if (dsc->u.block.before)
7023 pc_stored_at -= 4;
7024 }
7025
7026 pc_val = read_memory_unsigned_integer (pc_stored_at, 4, byte_order);
7027 stm_insn_addr = dsc->scratch_base;
7028 offset = pc_val - stm_insn_addr;
7029
7030 if (debug_displaced)
7031 fprintf_unfiltered (gdb_stdlog, "displaced: detected PC offset %.8lx for "
7032 "STM instruction\n", offset);
7033
7034 /* Rewrite the stored PC to the proper value for the non-displaced original
7035 instruction. */
7036 write_memory_unsigned_integer (pc_stored_at, 4, byte_order,
7037 dsc->insn_addr + offset);
7038 }
7039
7040 /* Clean up an LDM which includes the PC in the register list. We clumped all
7041 the registers in the transferred list into a contiguous range r0...rX (to
7042 avoid loading PC directly and losing control of the debugged program), so we
7043 must undo that here. */
7044
7045 static void
7046 cleanup_block_load_pc (struct gdbarch *gdbarch,
7047 struct regcache *regs,
7048 struct displaced_step_closure *dsc)
7049 {
7050 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
7051 int load_executed = condition_true (dsc->u.block.cond, status);
7052 unsigned int mask = dsc->u.block.regmask, write_reg = ARM_PC_REGNUM;
7053 unsigned int regs_loaded = bitcount (mask);
7054 unsigned int num_to_shuffle = regs_loaded, clobbered;
7055
7056 /* The method employed here will fail if the register list is fully populated
7057 (we need to avoid loading PC directly). */
7058 gdb_assert (num_to_shuffle < 16);
7059
7060 if (!load_executed)
7061 return;
7062
7063 clobbered = (1 << num_to_shuffle) - 1;
7064
7065 while (num_to_shuffle > 0)
7066 {
7067 if ((mask & (1 << write_reg)) != 0)
7068 {
7069 unsigned int read_reg = num_to_shuffle - 1;
7070
7071 if (read_reg != write_reg)
7072 {
7073 ULONGEST rval = displaced_read_reg (regs, dsc, read_reg);
7074 displaced_write_reg (regs, dsc, write_reg, rval, LOAD_WRITE_PC);
7075 if (debug_displaced)
7076 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: move "
7077 "loaded register r%d to r%d\n"), read_reg,
7078 write_reg);
7079 }
7080 else if (debug_displaced)
7081 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: register "
7082 "r%d already in the right place\n"),
7083 write_reg);
7084
7085 clobbered &= ~(1 << write_reg);
7086
7087 num_to_shuffle--;
7088 }
7089
7090 write_reg--;
7091 }
7092
7093 /* Restore any registers we scribbled over. */
7094 for (write_reg = 0; clobbered != 0; write_reg++)
7095 {
7096 if ((clobbered & (1 << write_reg)) != 0)
7097 {
7098 displaced_write_reg (regs, dsc, write_reg, dsc->tmp[write_reg],
7099 CANNOT_WRITE_PC);
7100 if (debug_displaced)
7101 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: restored "
7102 "clobbered register r%d\n"), write_reg);
7103 clobbered &= ~(1 << write_reg);
7104 }
7105 }
7106
7107 /* Perform register writeback manually. */
7108 if (dsc->u.block.writeback)
7109 {
7110 ULONGEST new_rn_val = dsc->u.block.xfer_addr;
7111
7112 if (dsc->u.block.increment)
7113 new_rn_val += regs_loaded * 4;
7114 else
7115 new_rn_val -= regs_loaded * 4;
7116
7117 displaced_write_reg (regs, dsc, dsc->u.block.rn, new_rn_val,
7118 CANNOT_WRITE_PC);
7119 }
7120 }
7121
7122 /* Handle ldm/stm, apart from some tricky cases which are unlikely to occur
7123 in user-level code (in particular exception return, ldm rn, {...pc}^). */
7124
7125 static int
7126 arm_copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn,
7127 struct regcache *regs,
7128 struct displaced_step_closure *dsc)
7129 {
7130 int load = bit (insn, 20);
7131 int user = bit (insn, 22);
7132 int increment = bit (insn, 23);
7133 int before = bit (insn, 24);
7134 int writeback = bit (insn, 21);
7135 int rn = bits (insn, 16, 19);
7136
7137 /* Block transfers which don't mention PC can be run directly
7138 out-of-line. */
7139 if (rn != ARM_PC_REGNUM && (insn & 0x8000) == 0)
7140 return arm_copy_unmodified (gdbarch, insn, "ldm/stm", dsc);
7141
7142 if (rn == ARM_PC_REGNUM)
7143 {
7144 warning (_("displaced: Unpredictable LDM or STM with "
7145 "base register r15"));
7146 return arm_copy_unmodified (gdbarch, insn, "unpredictable ldm/stm", dsc);
7147 }
7148
7149 if (debug_displaced)
7150 fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
7151 "%.8lx\n", (unsigned long) insn);
7152
7153 dsc->u.block.xfer_addr = displaced_read_reg (regs, dsc, rn);
7154 dsc->u.block.rn = rn;
7155
7156 dsc->u.block.load = load;
7157 dsc->u.block.user = user;
7158 dsc->u.block.increment = increment;
7159 dsc->u.block.before = before;
7160 dsc->u.block.writeback = writeback;
7161 dsc->u.block.cond = bits (insn, 28, 31);
7162
7163 dsc->u.block.regmask = insn & 0xffff;
7164
7165 if (load)
7166 {
7167 if ((insn & 0xffff) == 0xffff)
7168 {
7169 /* LDM with a fully-populated register list. This case is
7170 particularly tricky. Implement for now by fully emulating the
7171 instruction (which might not behave perfectly in all cases, but
7172 these instructions should be rare enough for that not to matter
7173 too much). */
7174 dsc->modinsn[0] = ARM_NOP;
7175
7176 dsc->cleanup = &cleanup_block_load_all;
7177 }
7178 else
7179 {
7180 /* LDM of a list of registers which includes PC. Implement by
7181 rewriting the list of registers to be transferred into a
7182 contiguous chunk r0...rX before doing the transfer, then shuffling
7183 registers into the correct places in the cleanup routine. */
7184 unsigned int regmask = insn & 0xffff;
7185 unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
7186 unsigned int to = 0, from = 0, i, new_rn;
7187
7188 for (i = 0; i < num_in_list; i++)
7189 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
7190
7191 /* Writeback makes things complicated. We need to avoid clobbering
7192 the base register with one of the registers in our modified
7193 register list, but just using a different register can't work in
7194 all cases, e.g.:
7195
7196 ldm r14!, {r0-r13,pc}
7197
7198 which would need to be rewritten as:
7199
7200 ldm rN!, {r0-r14}
7201
7202 but that can't work, because there's no free register for N.
7203
7204 Solve this by turning off the writeback bit, and emulating
7205 writeback manually in the cleanup routine. */
7206
7207 if (writeback)
7208 insn &= ~(1 << 21);
7209
7210 new_regmask = (1 << num_in_list) - 1;
7211
7212 if (debug_displaced)
7213 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
7214 "{..., pc}: original reg list %.4x, modified "
7215 "list %.4x\n"), rn, writeback ? "!" : "",
7216 (int) insn & 0xffff, new_regmask);
7217
7218 dsc->modinsn[0] = (insn & ~0xffff) | (new_regmask & 0xffff);
7219
7220 dsc->cleanup = &cleanup_block_load_pc;
7221 }
7222 }
7223 else
7224 {
7225 /* STM of a list of registers which includes PC. Run the instruction
7226 as-is, but out of line: this will store the wrong value for the PC,
7227 so we must manually fix up the memory in the cleanup routine.
7228 Doing things this way has the advantage that we can auto-detect
7229 the offset of the PC write (which is architecture-dependent) in
7230 the cleanup routine. */
7231 dsc->modinsn[0] = insn;
7232
7233 dsc->cleanup = &cleanup_block_store_pc;
7234 }
7235
7236 return 0;
7237 }
7238
7239 static int
7240 thumb2_copy_block_xfer (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
7241 struct regcache *regs,
7242 struct displaced_step_closure *dsc)
7243 {
7244 int rn = bits (insn1, 0, 3);
7245 int load = bit (insn1, 4);
7246 int writeback = bit (insn1, 5);
7247
7248 /* Block transfers which don't mention PC can be run directly
7249 out-of-line. */
7250 if (rn != ARM_PC_REGNUM && (insn2 & 0x8000) == 0)
7251 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "ldm/stm", dsc);
7252
7253 if (rn == ARM_PC_REGNUM)
7254 {
7255 warning (_("displaced: Unpredictable LDM or STM with "
7256 "base register r15"));
7257 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7258 "unpredictable ldm/stm", dsc);
7259 }
7260
7261 if (debug_displaced)
7262 fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
7263 "%.4x%.4x\n", insn1, insn2);
7264
7265 /* Clear bit 13, since it should be always zero. */
7266 dsc->u.block.regmask = (insn2 & 0xdfff);
7267 dsc->u.block.rn = rn;
7268
7269 dsc->u.block.load = load;
7270 dsc->u.block.user = 0;
7271 dsc->u.block.increment = bit (insn1, 7);
7272 dsc->u.block.before = bit (insn1, 8);
7273 dsc->u.block.writeback = writeback;
7274 dsc->u.block.cond = INST_AL;
7275 dsc->u.block.xfer_addr = displaced_read_reg (regs, dsc, rn);
7276
7277 if (load)
7278 {
7279 if (dsc->u.block.regmask == 0xffff)
7280 {
7281 /* This branch is impossible to happen. */
7282 gdb_assert (0);
7283 }
7284 else
7285 {
7286 unsigned int regmask = dsc->u.block.regmask;
7287 unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
7288 unsigned int to = 0, from = 0, i, new_rn;
7289
7290 for (i = 0; i < num_in_list; i++)
7291 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
7292
7293 if (writeback)
7294 insn1 &= ~(1 << 5);
7295
7296 new_regmask = (1 << num_in_list) - 1;
7297
7298 if (debug_displaced)
7299 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
7300 "{..., pc}: original reg list %.4x, modified "
7301 "list %.4x\n"), rn, writeback ? "!" : "",
7302 (int) dsc->u.block.regmask, new_regmask);
7303
7304 dsc->modinsn[0] = insn1;
7305 dsc->modinsn[1] = (new_regmask & 0xffff);
7306 dsc->numinsns = 2;
7307
7308 dsc->cleanup = &cleanup_block_load_pc;
7309 }
7310 }
7311 else
7312 {
7313 dsc->modinsn[0] = insn1;
7314 dsc->modinsn[1] = insn2;
7315 dsc->numinsns = 2;
7316 dsc->cleanup = &cleanup_block_store_pc;
7317 }
7318 return 0;
7319 }
7320
7321 /* Cleanup/copy SVC (SWI) instructions. These two functions are overridden
7322 for Linux, where some SVC instructions must be treated specially. */
7323
7324 static void
7325 cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs,
7326 struct displaced_step_closure *dsc)
7327 {
7328 CORE_ADDR resume_addr = dsc->insn_addr + dsc->insn_size;
7329
7330 if (debug_displaced)
7331 fprintf_unfiltered (gdb_stdlog, "displaced: cleanup for svc, resume at "
7332 "%.8lx\n", (unsigned long) resume_addr);
7333
7334 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, resume_addr, BRANCH_WRITE_PC);
7335 }
7336
7337
7338 /* Common copy routine for svc instruciton. */
7339
7340 static int
7341 install_svc (struct gdbarch *gdbarch, struct regcache *regs,
7342 struct displaced_step_closure *dsc)
7343 {
7344 /* Preparation: none.
7345 Insn: unmodified svc.
7346 Cleanup: pc <- insn_addr + insn_size. */
7347
7348 /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
7349 instruction. */
7350 dsc->wrote_to_pc = 1;
7351
7352 /* Allow OS-specific code to override SVC handling. */
7353 if (dsc->u.svc.copy_svc_os)
7354 return dsc->u.svc.copy_svc_os (gdbarch, regs, dsc);
7355 else
7356 {
7357 dsc->cleanup = &cleanup_svc;
7358 return 0;
7359 }
7360 }
7361
7362 static int
7363 arm_copy_svc (struct gdbarch *gdbarch, uint32_t insn,
7364 struct regcache *regs, struct displaced_step_closure *dsc)
7365 {
7366
7367 if (debug_displaced)
7368 fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.8lx\n",
7369 (unsigned long) insn);
7370
7371 dsc->modinsn[0] = insn;
7372
7373 return install_svc (gdbarch, regs, dsc);
7374 }
7375
7376 static int
7377 thumb_copy_svc (struct gdbarch *gdbarch, uint16_t insn,
7378 struct regcache *regs, struct displaced_step_closure *dsc)
7379 {
7380
7381 if (debug_displaced)
7382 fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.4x\n",
7383 insn);
7384
7385 dsc->modinsn[0] = insn;
7386
7387 return install_svc (gdbarch, regs, dsc);
7388 }
7389
7390 /* Copy undefined instructions. */
7391
7392 static int
7393 arm_copy_undef (struct gdbarch *gdbarch, uint32_t insn,
7394 struct displaced_step_closure *dsc)
7395 {
7396 if (debug_displaced)
7397 fprintf_unfiltered (gdb_stdlog,
7398 "displaced: copying undefined insn %.8lx\n",
7399 (unsigned long) insn);
7400
7401 dsc->modinsn[0] = insn;
7402
7403 return 0;
7404 }
7405
7406 static int
7407 thumb_32bit_copy_undef (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
7408 struct displaced_step_closure *dsc)
7409 {
7410
7411 if (debug_displaced)
7412 fprintf_unfiltered (gdb_stdlog, "displaced: copying undefined insn "
7413 "%.4x %.4x\n", (unsigned short) insn1,
7414 (unsigned short) insn2);
7415
7416 dsc->modinsn[0] = insn1;
7417 dsc->modinsn[1] = insn2;
7418 dsc->numinsns = 2;
7419
7420 return 0;
7421 }
7422
7423 /* Copy unpredictable instructions. */
7424
7425 static int
7426 arm_copy_unpred (struct gdbarch *gdbarch, uint32_t insn,
7427 struct displaced_step_closure *dsc)
7428 {
7429 if (debug_displaced)
7430 fprintf_unfiltered (gdb_stdlog, "displaced: copying unpredictable insn "
7431 "%.8lx\n", (unsigned long) insn);
7432
7433 dsc->modinsn[0] = insn;
7434
7435 return 0;
7436 }
7437
7438 /* The decode_* functions are instruction decoding helpers. They mostly follow
7439 the presentation in the ARM ARM. */
7440
7441 static int
7442 arm_decode_misc_memhint_neon (struct gdbarch *gdbarch, uint32_t insn,
7443 struct regcache *regs,
7444 struct displaced_step_closure *dsc)
7445 {
7446 unsigned int op1 = bits (insn, 20, 26), op2 = bits (insn, 4, 7);
7447 unsigned int rn = bits (insn, 16, 19);
7448
7449 if (op1 == 0x10 && (op2 & 0x2) == 0x0 && (rn & 0xe) == 0x0)
7450 return arm_copy_unmodified (gdbarch, insn, "cps", dsc);
7451 else if (op1 == 0x10 && op2 == 0x0 && (rn & 0xe) == 0x1)
7452 return arm_copy_unmodified (gdbarch, insn, "setend", dsc);
7453 else if ((op1 & 0x60) == 0x20)
7454 return arm_copy_unmodified (gdbarch, insn, "neon dataproc", dsc);
7455 else if ((op1 & 0x71) == 0x40)
7456 return arm_copy_unmodified (gdbarch, insn, "neon elt/struct load/store",
7457 dsc);
7458 else if ((op1 & 0x77) == 0x41)
7459 return arm_copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
7460 else if ((op1 & 0x77) == 0x45)
7461 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pli. */
7462 else if ((op1 & 0x77) == 0x51)
7463 {
7464 if (rn != 0xf)
7465 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
7466 else
7467 return arm_copy_unpred (gdbarch, insn, dsc);
7468 }
7469 else if ((op1 & 0x77) == 0x55)
7470 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
7471 else if (op1 == 0x57)
7472 switch (op2)
7473 {
7474 case 0x1: return arm_copy_unmodified (gdbarch, insn, "clrex", dsc);
7475 case 0x4: return arm_copy_unmodified (gdbarch, insn, "dsb", dsc);
7476 case 0x5: return arm_copy_unmodified (gdbarch, insn, "dmb", dsc);
7477 case 0x6: return arm_copy_unmodified (gdbarch, insn, "isb", dsc);
7478 default: return arm_copy_unpred (gdbarch, insn, dsc);
7479 }
7480 else if ((op1 & 0x63) == 0x43)
7481 return arm_copy_unpred (gdbarch, insn, dsc);
7482 else if ((op2 & 0x1) == 0x0)
7483 switch (op1 & ~0x80)
7484 {
7485 case 0x61:
7486 return arm_copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
7487 case 0x65:
7488 return arm_copy_preload_reg (gdbarch, insn, regs, dsc); /* pli reg. */
7489 case 0x71: case 0x75:
7490 /* pld/pldw reg. */
7491 return arm_copy_preload_reg (gdbarch, insn, regs, dsc);
7492 case 0x63: case 0x67: case 0x73: case 0x77:
7493 return arm_copy_unpred (gdbarch, insn, dsc);
7494 default:
7495 return arm_copy_undef (gdbarch, insn, dsc);
7496 }
7497 else
7498 return arm_copy_undef (gdbarch, insn, dsc); /* Probably unreachable. */
7499 }
7500
7501 static int
7502 arm_decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
7503 struct regcache *regs,
7504 struct displaced_step_closure *dsc)
7505 {
7506 if (bit (insn, 27) == 0)
7507 return arm_decode_misc_memhint_neon (gdbarch, insn, regs, dsc);
7508 /* Switch on bits: 0bxxxxx321xxx0xxxxxxxxxxxxxxxxxxxx. */
7509 else switch (((insn & 0x7000000) >> 23) | ((insn & 0x100000) >> 20))
7510 {
7511 case 0x0: case 0x2:
7512 return arm_copy_unmodified (gdbarch, insn, "srs", dsc);
7513
7514 case 0x1: case 0x3:
7515 return arm_copy_unmodified (gdbarch, insn, "rfe", dsc);
7516
7517 case 0x4: case 0x5: case 0x6: case 0x7:
7518 return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
7519
7520 case 0x8:
7521 switch ((insn & 0xe00000) >> 21)
7522 {
7523 case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7:
7524 /* stc/stc2. */
7525 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7526
7527 case 0x2:
7528 return arm_copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
7529
7530 default:
7531 return arm_copy_undef (gdbarch, insn, dsc);
7532 }
7533
7534 case 0x9:
7535 {
7536 int rn_f = (bits (insn, 16, 19) == 0xf);
7537 switch ((insn & 0xe00000) >> 21)
7538 {
7539 case 0x1: case 0x3:
7540 /* ldc/ldc2 imm (undefined for rn == pc). */
7541 return rn_f ? arm_copy_undef (gdbarch, insn, dsc)
7542 : arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7543
7544 case 0x2:
7545 return arm_copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
7546
7547 case 0x4: case 0x5: case 0x6: case 0x7:
7548 /* ldc/ldc2 lit (undefined for rn != pc). */
7549 return rn_f ? arm_copy_copro_load_store (gdbarch, insn, regs, dsc)
7550 : arm_copy_undef (gdbarch, insn, dsc);
7551
7552 default:
7553 return arm_copy_undef (gdbarch, insn, dsc);
7554 }
7555 }
7556
7557 case 0xa:
7558 return arm_copy_unmodified (gdbarch, insn, "stc/stc2", dsc);
7559
7560 case 0xb:
7561 if (bits (insn, 16, 19) == 0xf)
7562 /* ldc/ldc2 lit. */
7563 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7564 else
7565 return arm_copy_undef (gdbarch, insn, dsc);
7566
7567 case 0xc:
7568 if (bit (insn, 4))
7569 return arm_copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
7570 else
7571 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
7572
7573 case 0xd:
7574 if (bit (insn, 4))
7575 return arm_copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
7576 else
7577 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
7578
7579 default:
7580 return arm_copy_undef (gdbarch, insn, dsc);
7581 }
7582 }
7583
7584 /* Decode miscellaneous instructions in dp/misc encoding space. */
7585
7586 static int
7587 arm_decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
7588 struct regcache *regs,
7589 struct displaced_step_closure *dsc)
7590 {
7591 unsigned int op2 = bits (insn, 4, 6);
7592 unsigned int op = bits (insn, 21, 22);
7593 unsigned int op1 = bits (insn, 16, 19);
7594
7595 switch (op2)
7596 {
7597 case 0x0:
7598 return arm_copy_unmodified (gdbarch, insn, "mrs/msr", dsc);
7599
7600 case 0x1:
7601 if (op == 0x1) /* bx. */
7602 return arm_copy_bx_blx_reg (gdbarch, insn, regs, dsc);
7603 else if (op == 0x3)
7604 return arm_copy_unmodified (gdbarch, insn, "clz", dsc);
7605 else
7606 return arm_copy_undef (gdbarch, insn, dsc);
7607
7608 case 0x2:
7609 if (op == 0x1)
7610 /* Not really supported. */
7611 return arm_copy_unmodified (gdbarch, insn, "bxj", dsc);
7612 else
7613 return arm_copy_undef (gdbarch, insn, dsc);
7614
7615 case 0x3:
7616 if (op == 0x1)
7617 return arm_copy_bx_blx_reg (gdbarch, insn,
7618 regs, dsc); /* blx register. */
7619 else
7620 return arm_copy_undef (gdbarch, insn, dsc);
7621
7622 case 0x5:
7623 return arm_copy_unmodified (gdbarch, insn, "saturating add/sub", dsc);
7624
7625 case 0x7:
7626 if (op == 0x1)
7627 return arm_copy_unmodified (gdbarch, insn, "bkpt", dsc);
7628 else if (op == 0x3)
7629 /* Not really supported. */
7630 return arm_copy_unmodified (gdbarch, insn, "smc", dsc);
7631
7632 default:
7633 return arm_copy_undef (gdbarch, insn, dsc);
7634 }
7635 }
7636
7637 static int
7638 arm_decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn,
7639 struct regcache *regs,
7640 struct displaced_step_closure *dsc)
7641 {
7642 if (bit (insn, 25))
7643 switch (bits (insn, 20, 24))
7644 {
7645 case 0x10:
7646 return arm_copy_unmodified (gdbarch, insn, "movw", dsc);
7647
7648 case 0x14:
7649 return arm_copy_unmodified (gdbarch, insn, "movt", dsc);
7650
7651 case 0x12: case 0x16:
7652 return arm_copy_unmodified (gdbarch, insn, "msr imm", dsc);
7653
7654 default:
7655 return arm_copy_alu_imm (gdbarch, insn, regs, dsc);
7656 }
7657 else
7658 {
7659 uint32_t op1 = bits (insn, 20, 24), op2 = bits (insn, 4, 7);
7660
7661 if ((op1 & 0x19) != 0x10 && (op2 & 0x1) == 0x0)
7662 return arm_copy_alu_reg (gdbarch, insn, regs, dsc);
7663 else if ((op1 & 0x19) != 0x10 && (op2 & 0x9) == 0x1)
7664 return arm_copy_alu_shifted_reg (gdbarch, insn, regs, dsc);
7665 else if ((op1 & 0x19) == 0x10 && (op2 & 0x8) == 0x0)
7666 return arm_decode_miscellaneous (gdbarch, insn, regs, dsc);
7667 else if ((op1 & 0x19) == 0x10 && (op2 & 0x9) == 0x8)
7668 return arm_copy_unmodified (gdbarch, insn, "halfword mul/mla", dsc);
7669 else if ((op1 & 0x10) == 0x00 && op2 == 0x9)
7670 return arm_copy_unmodified (gdbarch, insn, "mul/mla", dsc);
7671 else if ((op1 & 0x10) == 0x10 && op2 == 0x9)
7672 return arm_copy_unmodified (gdbarch, insn, "synch", dsc);
7673 else if (op2 == 0xb || (op2 & 0xd) == 0xd)
7674 /* 2nd arg means "unpriveleged". */
7675 return arm_copy_extra_ld_st (gdbarch, insn, (op1 & 0x12) == 0x02, regs,
7676 dsc);
7677 }
7678
7679 /* Should be unreachable. */
7680 return 1;
7681 }
7682
7683 static int
7684 arm_decode_ld_st_word_ubyte (struct gdbarch *gdbarch, uint32_t insn,
7685 struct regcache *regs,
7686 struct displaced_step_closure *dsc)
7687 {
7688 int a = bit (insn, 25), b = bit (insn, 4);
7689 uint32_t op1 = bits (insn, 20, 24);
7690 int rn_f = bits (insn, 16, 19) == 0xf;
7691
7692 if ((!a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02)
7693 || (a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02 && !b))
7694 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 4, 0);
7695 else if ((!a && (op1 & 0x17) == 0x02)
7696 || (a && (op1 & 0x17) == 0x02 && !b))
7697 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 4, 1);
7698 else if ((!a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03)
7699 || (a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03 && !b))
7700 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 4, 0);
7701 else if ((!a && (op1 & 0x17) == 0x03)
7702 || (a && (op1 & 0x17) == 0x03 && !b))
7703 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 4, 1);
7704 else if ((!a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06)
7705 || (a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06 && !b))
7706 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 0);
7707 else if ((!a && (op1 & 0x17) == 0x06)
7708 || (a && (op1 & 0x17) == 0x06 && !b))
7709 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 1);
7710 else if ((!a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07)
7711 || (a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07 && !b))
7712 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 0);
7713 else if ((!a && (op1 & 0x17) == 0x07)
7714 || (a && (op1 & 0x17) == 0x07 && !b))
7715 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 1);
7716
7717 /* Should be unreachable. */
7718 return 1;
7719 }
7720
7721 static int
7722 arm_decode_media (struct gdbarch *gdbarch, uint32_t insn,
7723 struct displaced_step_closure *dsc)
7724 {
7725 switch (bits (insn, 20, 24))
7726 {
7727 case 0x00: case 0x01: case 0x02: case 0x03:
7728 return arm_copy_unmodified (gdbarch, insn, "parallel add/sub signed", dsc);
7729
7730 case 0x04: case 0x05: case 0x06: case 0x07:
7731 return arm_copy_unmodified (gdbarch, insn, "parallel add/sub unsigned", dsc);
7732
7733 case 0x08: case 0x09: case 0x0a: case 0x0b:
7734 case 0x0c: case 0x0d: case 0x0e: case 0x0f:
7735 return arm_copy_unmodified (gdbarch, insn,
7736 "decode/pack/unpack/saturate/reverse", dsc);
7737
7738 case 0x18:
7739 if (bits (insn, 5, 7) == 0) /* op2. */
7740 {
7741 if (bits (insn, 12, 15) == 0xf)
7742 return arm_copy_unmodified (gdbarch, insn, "usad8", dsc);
7743 else
7744 return arm_copy_unmodified (gdbarch, insn, "usada8", dsc);
7745 }
7746 else
7747 return arm_copy_undef (gdbarch, insn, dsc);
7748
7749 case 0x1a: case 0x1b:
7750 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
7751 return arm_copy_unmodified (gdbarch, insn, "sbfx", dsc);
7752 else
7753 return arm_copy_undef (gdbarch, insn, dsc);
7754
7755 case 0x1c: case 0x1d:
7756 if (bits (insn, 5, 6) == 0x0) /* op2[1:0]. */
7757 {
7758 if (bits (insn, 0, 3) == 0xf)
7759 return arm_copy_unmodified (gdbarch, insn, "bfc", dsc);
7760 else
7761 return arm_copy_unmodified (gdbarch, insn, "bfi", dsc);
7762 }
7763 else
7764 return arm_copy_undef (gdbarch, insn, dsc);
7765
7766 case 0x1e: case 0x1f:
7767 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
7768 return arm_copy_unmodified (gdbarch, insn, "ubfx", dsc);
7769 else
7770 return arm_copy_undef (gdbarch, insn, dsc);
7771 }
7772
7773 /* Should be unreachable. */
7774 return 1;
7775 }
7776
7777 static int
7778 arm_decode_b_bl_ldmstm (struct gdbarch *gdbarch, int32_t insn,
7779 struct regcache *regs,
7780 struct displaced_step_closure *dsc)
7781 {
7782 if (bit (insn, 25))
7783 return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
7784 else
7785 return arm_copy_block_xfer (gdbarch, insn, regs, dsc);
7786 }
7787
7788 static int
7789 arm_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
7790 struct regcache *regs,
7791 struct displaced_step_closure *dsc)
7792 {
7793 unsigned int opcode = bits (insn, 20, 24);
7794
7795 switch (opcode)
7796 {
7797 case 0x04: case 0x05: /* VFP/Neon mrrc/mcrr. */
7798 return arm_copy_unmodified (gdbarch, insn, "vfp/neon mrrc/mcrr", dsc);
7799
7800 case 0x08: case 0x0a: case 0x0c: case 0x0e:
7801 case 0x12: case 0x16:
7802 return arm_copy_unmodified (gdbarch, insn, "vfp/neon vstm/vpush", dsc);
7803
7804 case 0x09: case 0x0b: case 0x0d: case 0x0f:
7805 case 0x13: case 0x17:
7806 return arm_copy_unmodified (gdbarch, insn, "vfp/neon vldm/vpop", dsc);
7807
7808 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
7809 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
7810 /* Note: no writeback for these instructions. Bit 25 will always be
7811 zero though (via caller), so the following works OK. */
7812 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7813 }
7814
7815 /* Should be unreachable. */
7816 return 1;
7817 }
7818
7819 /* Decode shifted register instructions. */
7820
7821 static int
7822 thumb2_decode_dp_shift_reg (struct gdbarch *gdbarch, uint16_t insn1,
7823 uint16_t insn2, struct regcache *regs,
7824 struct displaced_step_closure *dsc)
7825 {
7826 /* PC is only allowed to be used in instruction MOV. */
7827
7828 unsigned int op = bits (insn1, 5, 8);
7829 unsigned int rn = bits (insn1, 0, 3);
7830
7831 if (op == 0x2 && rn == 0xf) /* MOV */
7832 return thumb2_copy_alu_imm (gdbarch, insn1, insn2, regs, dsc);
7833 else
7834 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7835 "dp (shift reg)", dsc);
7836 }
7837
7838
7839 /* Decode extension register load/store. Exactly the same as
7840 arm_decode_ext_reg_ld_st. */
7841
7842 static int
7843 thumb2_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint16_t insn1,
7844 uint16_t insn2, struct regcache *regs,
7845 struct displaced_step_closure *dsc)
7846 {
7847 unsigned int opcode = bits (insn1, 4, 8);
7848
7849 switch (opcode)
7850 {
7851 case 0x04: case 0x05:
7852 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7853 "vfp/neon vmov", dsc);
7854
7855 case 0x08: case 0x0c: /* 01x00 */
7856 case 0x0a: case 0x0e: /* 01x10 */
7857 case 0x12: case 0x16: /* 10x10 */
7858 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7859 "vfp/neon vstm/vpush", dsc);
7860
7861 case 0x09: case 0x0d: /* 01x01 */
7862 case 0x0b: case 0x0f: /* 01x11 */
7863 case 0x13: case 0x17: /* 10x11 */
7864 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7865 "vfp/neon vldm/vpop", dsc);
7866
7867 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
7868 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7869 "vstr", dsc);
7870 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
7871 return thumb2_copy_copro_load_store (gdbarch, insn1, insn2, regs, dsc);
7872 }
7873
7874 /* Should be unreachable. */
7875 return 1;
7876 }
7877
7878 static int
7879 arm_decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
7880 struct regcache *regs, struct displaced_step_closure *dsc)
7881 {
7882 unsigned int op1 = bits (insn, 20, 25);
7883 int op = bit (insn, 4);
7884 unsigned int coproc = bits (insn, 8, 11);
7885 unsigned int rn = bits (insn, 16, 19);
7886
7887 if ((op1 & 0x20) == 0x00 && (op1 & 0x3a) != 0x00 && (coproc & 0xe) == 0xa)
7888 return arm_decode_ext_reg_ld_st (gdbarch, insn, regs, dsc);
7889 else if ((op1 & 0x21) == 0x00 && (op1 & 0x3a) != 0x00
7890 && (coproc & 0xe) != 0xa)
7891 /* stc/stc2. */
7892 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7893 else if ((op1 & 0x21) == 0x01 && (op1 & 0x3a) != 0x00
7894 && (coproc & 0xe) != 0xa)
7895 /* ldc/ldc2 imm/lit. */
7896 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7897 else if ((op1 & 0x3e) == 0x00)
7898 return arm_copy_undef (gdbarch, insn, dsc);
7899 else if ((op1 & 0x3e) == 0x04 && (coproc & 0xe) == 0xa)
7900 return arm_copy_unmodified (gdbarch, insn, "neon 64bit xfer", dsc);
7901 else if (op1 == 0x04 && (coproc & 0xe) != 0xa)
7902 return arm_copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
7903 else if (op1 == 0x05 && (coproc & 0xe) != 0xa)
7904 return arm_copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
7905 else if ((op1 & 0x30) == 0x20 && !op)
7906 {
7907 if ((coproc & 0xe) == 0xa)
7908 return arm_copy_unmodified (gdbarch, insn, "vfp dataproc", dsc);
7909 else
7910 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
7911 }
7912 else if ((op1 & 0x30) == 0x20 && op)
7913 return arm_copy_unmodified (gdbarch, insn, "neon 8/16/32 bit xfer", dsc);
7914 else if ((op1 & 0x31) == 0x20 && op && (coproc & 0xe) != 0xa)
7915 return arm_copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
7916 else if ((op1 & 0x31) == 0x21 && op && (coproc & 0xe) != 0xa)
7917 return arm_copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
7918 else if ((op1 & 0x30) == 0x30)
7919 return arm_copy_svc (gdbarch, insn, regs, dsc);
7920 else
7921 return arm_copy_undef (gdbarch, insn, dsc); /* Possibly unreachable. */
7922 }
7923
7924 static int
7925 thumb2_decode_svc_copro (struct gdbarch *gdbarch, uint16_t insn1,
7926 uint16_t insn2, struct regcache *regs,
7927 struct displaced_step_closure *dsc)
7928 {
7929 unsigned int coproc = bits (insn2, 8, 11);
7930 unsigned int op1 = bits (insn1, 4, 9);
7931 unsigned int bit_5_8 = bits (insn1, 5, 8);
7932 unsigned int bit_9 = bit (insn1, 9);
7933 unsigned int bit_4 = bit (insn1, 4);
7934 unsigned int rn = bits (insn1, 0, 3);
7935
7936 if (bit_9 == 0)
7937 {
7938 if (bit_5_8 == 2)
7939 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7940 "neon 64bit xfer/mrrc/mrrc2/mcrr/mcrr2",
7941 dsc);
7942 else if (bit_5_8 == 0) /* UNDEFINED. */
7943 return thumb_32bit_copy_undef (gdbarch, insn1, insn2, dsc);
7944 else
7945 {
7946 /*coproc is 101x. SIMD/VFP, ext registers load/store. */
7947 if ((coproc & 0xe) == 0xa)
7948 return thumb2_decode_ext_reg_ld_st (gdbarch, insn1, insn2, regs,
7949 dsc);
7950 else /* coproc is not 101x. */
7951 {
7952 if (bit_4 == 0) /* STC/STC2. */
7953 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7954 "stc/stc2", dsc);
7955 else /* LDC/LDC2 {literal, immeidate}. */
7956 return thumb2_copy_copro_load_store (gdbarch, insn1, insn2,
7957 regs, dsc);
7958 }
7959 }
7960 }
7961 else
7962 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "coproc", dsc);
7963
7964 return 0;
7965 }
7966
7967 static void
7968 install_pc_relative (struct gdbarch *gdbarch, struct regcache *regs,
7969 struct displaced_step_closure *dsc, int rd)
7970 {
7971 /* ADR Rd, #imm
7972
7973 Rewrite as:
7974
7975 Preparation: Rd <- PC
7976 Insn: ADD Rd, #imm
7977 Cleanup: Null.
7978 */
7979
7980 /* Rd <- PC */
7981 int val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
7982 displaced_write_reg (regs, dsc, rd, val, CANNOT_WRITE_PC);
7983 }
7984
7985 static int
7986 thumb_copy_pc_relative_16bit (struct gdbarch *gdbarch, struct regcache *regs,
7987 struct displaced_step_closure *dsc,
7988 int rd, unsigned int imm)
7989 {
7990
7991 /* Encoding T2: ADDS Rd, #imm */
7992 dsc->modinsn[0] = (0x3000 | (rd << 8) | imm);
7993
7994 install_pc_relative (gdbarch, regs, dsc, rd);
7995
7996 return 0;
7997 }
7998
7999 static int
8000 thumb_decode_pc_relative_16bit (struct gdbarch *gdbarch, uint16_t insn,
8001 struct regcache *regs,
8002 struct displaced_step_closure *dsc)
8003 {
8004 unsigned int rd = bits (insn, 8, 10);
8005 unsigned int imm8 = bits (insn, 0, 7);
8006
8007 if (debug_displaced)
8008 fprintf_unfiltered (gdb_stdlog,
8009 "displaced: copying thumb adr r%d, #%d insn %.4x\n",
8010 rd, imm8, insn);
8011
8012 return thumb_copy_pc_relative_16bit (gdbarch, regs, dsc, rd, imm8);
8013 }
8014
8015 static int
8016 thumb_copy_pc_relative_32bit (struct gdbarch *gdbarch, uint16_t insn1,
8017 uint16_t insn2, struct regcache *regs,
8018 struct displaced_step_closure *dsc)
8019 {
8020 unsigned int rd = bits (insn2, 8, 11);
8021 /* Since immediate has the same encoding in ADR ADD and SUB, so we simply
8022 extract raw immediate encoding rather than computing immediate. When
8023 generating ADD or SUB instruction, we can simply perform OR operation to
8024 set immediate into ADD. */
8025 unsigned int imm_3_8 = insn2 & 0x70ff;
8026 unsigned int imm_i = insn1 & 0x0400; /* Clear all bits except bit 10. */
8027
8028 if (debug_displaced)
8029 fprintf_unfiltered (gdb_stdlog,
8030 "displaced: copying thumb adr r%d, #%d:%d insn %.4x%.4x\n",
8031 rd, imm_i, imm_3_8, insn1, insn2);
8032
8033 if (bit (insn1, 7)) /* Encoding T2 */
8034 {
8035 /* Encoding T3: SUB Rd, Rd, #imm */
8036 dsc->modinsn[0] = (0xf1a0 | rd | imm_i);
8037 dsc->modinsn[1] = ((rd << 8) | imm_3_8);
8038 }
8039 else /* Encoding T3 */
8040 {
8041 /* Encoding T3: ADD Rd, Rd, #imm */
8042 dsc->modinsn[0] = (0xf100 | rd | imm_i);
8043 dsc->modinsn[1] = ((rd << 8) | imm_3_8);
8044 }
8045 dsc->numinsns = 2;
8046
8047 install_pc_relative (gdbarch, regs, dsc, rd);
8048
8049 return 0;
8050 }
8051
8052 static int
8053 thumb_copy_16bit_ldr_literal (struct gdbarch *gdbarch, unsigned short insn1,
8054 struct regcache *regs,
8055 struct displaced_step_closure *dsc)
8056 {
8057 unsigned int rt = bits (insn1, 8, 10);
8058 unsigned int pc;
8059 int imm8 = (bits (insn1, 0, 7) << 2);
8060 CORE_ADDR from = dsc->insn_addr;
8061
8062 /* LDR Rd, #imm8
8063
8064 Rwrite as:
8065
8066 Preparation: tmp0 <- R0, tmp2 <- R2, tmp3 <- R3, R2 <- PC, R3 <- #imm8;
8067
8068 Insn: LDR R0, [R2, R3];
8069 Cleanup: R2 <- tmp2, R3 <- tmp3, Rd <- R0, R0 <- tmp0 */
8070
8071 if (debug_displaced)
8072 fprintf_unfiltered (gdb_stdlog,
8073 "displaced: copying thumb ldr r%d [pc #%d]\n"
8074 , rt, imm8);
8075
8076 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
8077 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
8078 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
8079 pc = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
8080 /* The assembler calculates the required value of the offset from the
8081 Align(PC,4) value of this instruction to the label. */
8082 pc = pc & 0xfffffffc;
8083
8084 displaced_write_reg (regs, dsc, 2, pc, CANNOT_WRITE_PC);
8085 displaced_write_reg (regs, dsc, 3, imm8, CANNOT_WRITE_PC);
8086
8087 dsc->rd = rt;
8088 dsc->u.ldst.xfersize = 4;
8089 dsc->u.ldst.rn = 0;
8090 dsc->u.ldst.immed = 0;
8091 dsc->u.ldst.writeback = 0;
8092 dsc->u.ldst.restore_r4 = 0;
8093
8094 dsc->modinsn[0] = 0x58d0; /* ldr r0, [r2, r3]*/
8095
8096 dsc->cleanup = &cleanup_load;
8097
8098 return 0;
8099 }
8100
8101 /* Copy Thumb cbnz/cbz insruction. */
8102
8103 static int
8104 thumb_copy_cbnz_cbz (struct gdbarch *gdbarch, uint16_t insn1,
8105 struct regcache *regs,
8106 struct displaced_step_closure *dsc)
8107 {
8108 int non_zero = bit (insn1, 11);
8109 unsigned int imm5 = (bit (insn1, 9) << 6) | (bits (insn1, 3, 7) << 1);
8110 CORE_ADDR from = dsc->insn_addr;
8111 int rn = bits (insn1, 0, 2);
8112 int rn_val = displaced_read_reg (regs, dsc, rn);
8113
8114 dsc->u.branch.cond = (rn_val && non_zero) || (!rn_val && !non_zero);
8115 /* CBNZ and CBZ do not affect the condition flags. If condition is true,
8116 set it INST_AL, so cleanup_branch will know branch is taken, otherwise,
8117 condition is false, let it be, cleanup_branch will do nothing. */
8118 if (dsc->u.branch.cond)
8119 {
8120 dsc->u.branch.cond = INST_AL;
8121 dsc->u.branch.dest = from + 4 + imm5;
8122 }
8123 else
8124 dsc->u.branch.dest = from + 2;
8125
8126 dsc->u.branch.link = 0;
8127 dsc->u.branch.exchange = 0;
8128
8129 if (debug_displaced)
8130 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s [r%d = 0x%x]"
8131 " insn %.4x to %.8lx\n", non_zero ? "cbnz" : "cbz",
8132 rn, rn_val, insn1, dsc->u.branch.dest);
8133
8134 dsc->modinsn[0] = THUMB_NOP;
8135
8136 dsc->cleanup = &cleanup_branch;
8137 return 0;
8138 }
8139
8140 /* Copy Table Branch Byte/Halfword */
8141 static int
8142 thumb2_copy_table_branch (struct gdbarch *gdbarch, uint16_t insn1,
8143 uint16_t insn2, struct regcache *regs,
8144 struct displaced_step_closure *dsc)
8145 {
8146 ULONGEST rn_val, rm_val;
8147 int is_tbh = bit (insn2, 4);
8148 CORE_ADDR halfwords = 0;
8149 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
8150
8151 rn_val = displaced_read_reg (regs, dsc, bits (insn1, 0, 3));
8152 rm_val = displaced_read_reg (regs, dsc, bits (insn2, 0, 3));
8153
8154 if (is_tbh)
8155 {
8156 gdb_byte buf[2];
8157
8158 target_read_memory (rn_val + 2 * rm_val, buf, 2);
8159 halfwords = extract_unsigned_integer (buf, 2, byte_order);
8160 }
8161 else
8162 {
8163 gdb_byte buf[1];
8164
8165 target_read_memory (rn_val + rm_val, buf, 1);
8166 halfwords = extract_unsigned_integer (buf, 1, byte_order);
8167 }
8168
8169 if (debug_displaced)
8170 fprintf_unfiltered (gdb_stdlog, "displaced: %s base 0x%x offset 0x%x"
8171 " offset 0x%x\n", is_tbh ? "tbh" : "tbb",
8172 (unsigned int) rn_val, (unsigned int) rm_val,
8173 (unsigned int) halfwords);
8174
8175 dsc->u.branch.cond = INST_AL;
8176 dsc->u.branch.link = 0;
8177 dsc->u.branch.exchange = 0;
8178 dsc->u.branch.dest = dsc->insn_addr + 4 + 2 * halfwords;
8179
8180 dsc->cleanup = &cleanup_branch;
8181
8182 return 0;
8183 }
8184
8185 static void
8186 cleanup_pop_pc_16bit_all (struct gdbarch *gdbarch, struct regcache *regs,
8187 struct displaced_step_closure *dsc)
8188 {
8189 /* PC <- r7 */
8190 int val = displaced_read_reg (regs, dsc, 7);
8191 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, val, BX_WRITE_PC);
8192
8193 /* r7 <- r8 */
8194 val = displaced_read_reg (regs, dsc, 8);
8195 displaced_write_reg (regs, dsc, 7, val, CANNOT_WRITE_PC);
8196
8197 /* r8 <- tmp[0] */
8198 displaced_write_reg (regs, dsc, 8, dsc->tmp[0], CANNOT_WRITE_PC);
8199
8200 }
8201
8202 static int
8203 thumb_copy_pop_pc_16bit (struct gdbarch *gdbarch, unsigned short insn1,
8204 struct regcache *regs,
8205 struct displaced_step_closure *dsc)
8206 {
8207 dsc->u.block.regmask = insn1 & 0x00ff;
8208
8209 /* Rewrite instruction: POP {rX, rY, ...,rZ, PC}
8210 to :
8211
8212 (1) register list is full, that is, r0-r7 are used.
8213 Prepare: tmp[0] <- r8
8214
8215 POP {r0, r1, ...., r6, r7}; remove PC from reglist
8216 MOV r8, r7; Move value of r7 to r8;
8217 POP {r7}; Store PC value into r7.
8218
8219 Cleanup: PC <- r7, r7 <- r8, r8 <-tmp[0]
8220
8221 (2) register list is not full, supposing there are N registers in
8222 register list (except PC, 0 <= N <= 7).
8223 Prepare: for each i, 0 - N, tmp[i] <- ri.
8224
8225 POP {r0, r1, ...., rN};
8226
8227 Cleanup: Set registers in original reglist from r0 - rN. Restore r0 - rN
8228 from tmp[] properly.
8229 */
8230 if (debug_displaced)
8231 fprintf_unfiltered (gdb_stdlog,
8232 "displaced: copying thumb pop {%.8x, pc} insn %.4x\n",
8233 dsc->u.block.regmask, insn1);
8234
8235 if (dsc->u.block.regmask == 0xff)
8236 {
8237 dsc->tmp[0] = displaced_read_reg (regs, dsc, 8);
8238
8239 dsc->modinsn[0] = (insn1 & 0xfeff); /* POP {r0,r1,...,r6, r7} */
8240 dsc->modinsn[1] = 0x46b8; /* MOV r8, r7 */
8241 dsc->modinsn[2] = 0xbc80; /* POP {r7} */
8242
8243 dsc->numinsns = 3;
8244 dsc->cleanup = &cleanup_pop_pc_16bit_all;
8245 }
8246 else
8247 {
8248 unsigned int num_in_list = bitcount (dsc->u.block.regmask);
8249 unsigned int new_regmask, bit = 1;
8250 unsigned int to = 0, from = 0, i, new_rn;
8251
8252 for (i = 0; i < num_in_list + 1; i++)
8253 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
8254
8255 new_regmask = (1 << (num_in_list + 1)) - 1;
8256
8257 if (debug_displaced)
8258 fprintf_unfiltered (gdb_stdlog, _("displaced: POP "
8259 "{..., pc}: original reg list %.4x,"
8260 " modified list %.4x\n"),
8261 (int) dsc->u.block.regmask, new_regmask);
8262
8263 dsc->u.block.regmask |= 0x8000;
8264 dsc->u.block.writeback = 0;
8265 dsc->u.block.cond = INST_AL;
8266
8267 dsc->modinsn[0] = (insn1 & ~0x1ff) | (new_regmask & 0xff);
8268
8269 dsc->cleanup = &cleanup_block_load_pc;
8270 }
8271
8272 return 0;
8273 }
8274
8275 static void
8276 thumb_process_displaced_16bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
8277 struct regcache *regs,
8278 struct displaced_step_closure *dsc)
8279 {
8280 unsigned short op_bit_12_15 = bits (insn1, 12, 15);
8281 unsigned short op_bit_10_11 = bits (insn1, 10, 11);
8282 int err = 0;
8283
8284 /* 16-bit thumb instructions. */
8285 switch (op_bit_12_15)
8286 {
8287 /* Shift (imme), add, subtract, move and compare. */
8288 case 0: case 1: case 2: case 3:
8289 err = thumb_copy_unmodified_16bit (gdbarch, insn1,
8290 "shift/add/sub/mov/cmp",
8291 dsc);
8292 break;
8293 case 4:
8294 switch (op_bit_10_11)
8295 {
8296 case 0: /* Data-processing */
8297 err = thumb_copy_unmodified_16bit (gdbarch, insn1,
8298 "data-processing",
8299 dsc);
8300 break;
8301 case 1: /* Special data instructions and branch and exchange. */
8302 {
8303 unsigned short op = bits (insn1, 7, 9);
8304 if (op == 6 || op == 7) /* BX or BLX */
8305 err = thumb_copy_bx_blx_reg (gdbarch, insn1, regs, dsc);
8306 else if (bits (insn1, 6, 7) != 0) /* ADD/MOV/CMP high registers. */
8307 err = thumb_copy_alu_reg (gdbarch, insn1, regs, dsc);
8308 else
8309 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "special data",
8310 dsc);
8311 }
8312 break;
8313 default: /* LDR (literal) */
8314 err = thumb_copy_16bit_ldr_literal (gdbarch, insn1, regs, dsc);
8315 }
8316 break;
8317 case 5: case 6: case 7: case 8: case 9: /* Load/Store single data item */
8318 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "ldr/str", dsc);
8319 break;
8320 case 10:
8321 if (op_bit_10_11 < 2) /* Generate PC-relative address */
8322 err = thumb_decode_pc_relative_16bit (gdbarch, insn1, regs, dsc);
8323 else /* Generate SP-relative address */
8324 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "sp-relative", dsc);
8325 break;
8326 case 11: /* Misc 16-bit instructions */
8327 {
8328 switch (bits (insn1, 8, 11))
8329 {
8330 case 1: case 3: case 9: case 11: /* CBNZ, CBZ */
8331 err = thumb_copy_cbnz_cbz (gdbarch, insn1, regs, dsc);
8332 break;
8333 case 12: case 13: /* POP */
8334 if (bit (insn1, 8)) /* PC is in register list. */
8335 err = thumb_copy_pop_pc_16bit (gdbarch, insn1, regs, dsc);
8336 else
8337 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "pop", dsc);
8338 break;
8339 case 15: /* If-Then, and hints */
8340 if (bits (insn1, 0, 3))
8341 /* If-Then makes up to four following instructions conditional.
8342 IT instruction itself is not conditional, so handle it as a
8343 common unmodified instruction. */
8344 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "If-Then",
8345 dsc);
8346 else
8347 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "hints", dsc);
8348 break;
8349 default:
8350 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "misc", dsc);
8351 }
8352 }
8353 break;
8354 case 12:
8355 if (op_bit_10_11 < 2) /* Store multiple registers */
8356 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "stm", dsc);
8357 else /* Load multiple registers */
8358 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "ldm", dsc);
8359 break;
8360 case 13: /* Conditional branch and supervisor call */
8361 if (bits (insn1, 9, 11) != 7) /* conditional branch */
8362 err = thumb_copy_b (gdbarch, insn1, dsc);
8363 else
8364 err = thumb_copy_svc (gdbarch, insn1, regs, dsc);
8365 break;
8366 case 14: /* Unconditional branch */
8367 err = thumb_copy_b (gdbarch, insn1, dsc);
8368 break;
8369 default:
8370 err = 1;
8371 }
8372
8373 if (err)
8374 internal_error (__FILE__, __LINE__,
8375 _("thumb_process_displaced_16bit_insn: Instruction decode error"));
8376 }
8377
8378 static int
8379 decode_thumb_32bit_ld_mem_hints (struct gdbarch *gdbarch,
8380 uint16_t insn1, uint16_t insn2,
8381 struct regcache *regs,
8382 struct displaced_step_closure *dsc)
8383 {
8384 int rt = bits (insn2, 12, 15);
8385 int rn = bits (insn1, 0, 3);
8386 int op1 = bits (insn1, 7, 8);
8387 int err = 0;
8388
8389 switch (bits (insn1, 5, 6))
8390 {
8391 case 0: /* Load byte and memory hints */
8392 if (rt == 0xf) /* PLD/PLI */
8393 {
8394 if (rn == 0xf)
8395 /* PLD literal or Encoding T3 of PLI(immediate, literal). */
8396 return thumb2_copy_preload (gdbarch, insn1, insn2, regs, dsc);
8397 else
8398 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8399 "pli/pld", dsc);
8400 }
8401 else
8402 {
8403 if (rn == 0xf) /* LDRB/LDRSB (literal) */
8404 return thumb2_copy_load_literal (gdbarch, insn1, insn2, regs, dsc,
8405 1);
8406 else
8407 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8408 "ldrb{reg, immediate}/ldrbt",
8409 dsc);
8410 }
8411
8412 break;
8413 case 1: /* Load halfword and memory hints. */
8414 if (rt == 0xf) /* PLD{W} and Unalloc memory hint. */
8415 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8416 "pld/unalloc memhint", dsc);
8417 else
8418 {
8419 if (rn == 0xf)
8420 return thumb2_copy_load_literal (gdbarch, insn1, insn2, regs, dsc,
8421 2);
8422 else
8423 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8424 "ldrh/ldrht", dsc);
8425 }
8426 break;
8427 case 2: /* Load word */
8428 {
8429 int insn2_bit_8_11 = bits (insn2, 8, 11);
8430
8431 if (rn == 0xf)
8432 return thumb2_copy_load_literal (gdbarch, insn1, insn2, regs, dsc, 4);
8433 else if (op1 == 0x1) /* Encoding T3 */
8434 return thumb2_copy_load_reg_imm (gdbarch, insn1, insn2, regs, dsc,
8435 0, 1);
8436 else /* op1 == 0x0 */
8437 {
8438 if (insn2_bit_8_11 == 0xc || (insn2_bit_8_11 & 0x9) == 0x9)
8439 /* LDR (immediate) */
8440 return thumb2_copy_load_reg_imm (gdbarch, insn1, insn2, regs,
8441 dsc, bit (insn2, 8), 1);
8442 else if (insn2_bit_8_11 == 0xe) /* LDRT */
8443 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8444 "ldrt", dsc);
8445 else
8446 /* LDR (register) */
8447 return thumb2_copy_load_reg_imm (gdbarch, insn1, insn2, regs,
8448 dsc, 0, 0);
8449 }
8450 break;
8451 }
8452 default:
8453 return thumb_32bit_copy_undef (gdbarch, insn1, insn2, dsc);
8454 break;
8455 }
8456 return 0;
8457 }
8458
8459 static void
8460 thumb_process_displaced_32bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
8461 uint16_t insn2, struct regcache *regs,
8462 struct displaced_step_closure *dsc)
8463 {
8464 int err = 0;
8465 unsigned short op = bit (insn2, 15);
8466 unsigned int op1 = bits (insn1, 11, 12);
8467
8468 switch (op1)
8469 {
8470 case 1:
8471 {
8472 switch (bits (insn1, 9, 10))
8473 {
8474 case 0:
8475 if (bit (insn1, 6))
8476 {
8477 /* Load/store {dual, execlusive}, table branch. */
8478 if (bits (insn1, 7, 8) == 1 && bits (insn1, 4, 5) == 1
8479 && bits (insn2, 5, 7) == 0)
8480 err = thumb2_copy_table_branch (gdbarch, insn1, insn2, regs,
8481 dsc);
8482 else
8483 /* PC is not allowed to use in load/store {dual, exclusive}
8484 instructions. */
8485 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8486 "load/store dual/ex", dsc);
8487 }
8488 else /* load/store multiple */
8489 {
8490 switch (bits (insn1, 7, 8))
8491 {
8492 case 0: case 3: /* SRS, RFE */
8493 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8494 "srs/rfe", dsc);
8495 break;
8496 case 1: case 2: /* LDM/STM/PUSH/POP */
8497 err = thumb2_copy_block_xfer (gdbarch, insn1, insn2, regs, dsc);
8498 break;
8499 }
8500 }
8501 break;
8502
8503 case 1:
8504 /* Data-processing (shift register). */
8505 err = thumb2_decode_dp_shift_reg (gdbarch, insn1, insn2, regs,
8506 dsc);
8507 break;
8508 default: /* Coprocessor instructions. */
8509 err = thumb2_decode_svc_copro (gdbarch, insn1, insn2, regs, dsc);
8510 break;
8511 }
8512 break;
8513 }
8514 case 2: /* op1 = 2 */
8515 if (op) /* Branch and misc control. */
8516 {
8517 if (bit (insn2, 14) /* BLX/BL */
8518 || bit (insn2, 12) /* Unconditional branch */
8519 || (bits (insn1, 7, 9) != 0x7)) /* Conditional branch */
8520 err = thumb2_copy_b_bl_blx (gdbarch, insn1, insn2, regs, dsc);
8521 else
8522 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8523 "misc ctrl", dsc);
8524 }
8525 else
8526 {
8527 if (bit (insn1, 9)) /* Data processing (plain binary imm). */
8528 {
8529 int op = bits (insn1, 4, 8);
8530 int rn = bits (insn1, 0, 3);
8531 if ((op == 0 || op == 0xa) && rn == 0xf)
8532 err = thumb_copy_pc_relative_32bit (gdbarch, insn1, insn2,
8533 regs, dsc);
8534 else
8535 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8536 "dp/pb", dsc);
8537 }
8538 else /* Data processing (modified immeidate) */
8539 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8540 "dp/mi", dsc);
8541 }
8542 break;
8543 case 3: /* op1 = 3 */
8544 switch (bits (insn1, 9, 10))
8545 {
8546 case 0:
8547 if (bit (insn1, 4))
8548 err = decode_thumb_32bit_ld_mem_hints (gdbarch, insn1, insn2,
8549 regs, dsc);
8550 else /* NEON Load/Store and Store single data item */
8551 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8552 "neon elt/struct load/store",
8553 dsc);
8554 break;
8555 case 1: /* op1 = 3, bits (9, 10) == 1 */
8556 switch (bits (insn1, 7, 8))
8557 {
8558 case 0: case 1: /* Data processing (register) */
8559 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8560 "dp(reg)", dsc);
8561 break;
8562 case 2: /* Multiply and absolute difference */
8563 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8564 "mul/mua/diff", dsc);
8565 break;
8566 case 3: /* Long multiply and divide */
8567 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8568 "lmul/lmua", dsc);
8569 break;
8570 }
8571 break;
8572 default: /* Coprocessor instructions */
8573 err = thumb2_decode_svc_copro (gdbarch, insn1, insn2, regs, dsc);
8574 break;
8575 }
8576 break;
8577 default:
8578 err = 1;
8579 }
8580
8581 if (err)
8582 internal_error (__FILE__, __LINE__,
8583 _("thumb_process_displaced_32bit_insn: Instruction decode error"));
8584
8585 }
8586
8587 static void
8588 thumb_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
8589 CORE_ADDR to, struct regcache *regs,
8590 struct displaced_step_closure *dsc)
8591 {
8592 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
8593 uint16_t insn1
8594 = read_memory_unsigned_integer (from, 2, byte_order_for_code);
8595
8596 if (debug_displaced)
8597 fprintf_unfiltered (gdb_stdlog, "displaced: process thumb insn %.4x "
8598 "at %.8lx\n", insn1, (unsigned long) from);
8599
8600 dsc->is_thumb = 1;
8601 dsc->insn_size = thumb_insn_size (insn1);
8602 if (thumb_insn_size (insn1) == 4)
8603 {
8604 uint16_t insn2
8605 = read_memory_unsigned_integer (from + 2, 2, byte_order_for_code);
8606 thumb_process_displaced_32bit_insn (gdbarch, insn1, insn2, regs, dsc);
8607 }
8608 else
8609 thumb_process_displaced_16bit_insn (gdbarch, insn1, regs, dsc);
8610 }
8611
8612 void
8613 arm_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
8614 CORE_ADDR to, struct regcache *regs,
8615 struct displaced_step_closure *dsc)
8616 {
8617 int err = 0;
8618 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
8619 uint32_t insn;
8620
8621 /* Most displaced instructions use a 1-instruction scratch space, so set this
8622 here and override below if/when necessary. */
8623 dsc->numinsns = 1;
8624 dsc->insn_addr = from;
8625 dsc->scratch_base = to;
8626 dsc->cleanup = NULL;
8627 dsc->wrote_to_pc = 0;
8628
8629 if (!displaced_in_arm_mode (regs))
8630 return thumb_process_displaced_insn (gdbarch, from, to, regs, dsc);
8631
8632 dsc->is_thumb = 0;
8633 dsc->insn_size = 4;
8634 insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
8635 if (debug_displaced)
8636 fprintf_unfiltered (gdb_stdlog, "displaced: stepping insn %.8lx "
8637 "at %.8lx\n", (unsigned long) insn,
8638 (unsigned long) from);
8639
8640 if ((insn & 0xf0000000) == 0xf0000000)
8641 err = arm_decode_unconditional (gdbarch, insn, regs, dsc);
8642 else switch (((insn & 0x10) >> 4) | ((insn & 0xe000000) >> 24))
8643 {
8644 case 0x0: case 0x1: case 0x2: case 0x3:
8645 err = arm_decode_dp_misc (gdbarch, insn, regs, dsc);
8646 break;
8647
8648 case 0x4: case 0x5: case 0x6:
8649 err = arm_decode_ld_st_word_ubyte (gdbarch, insn, regs, dsc);
8650 break;
8651
8652 case 0x7:
8653 err = arm_decode_media (gdbarch, insn, dsc);
8654 break;
8655
8656 case 0x8: case 0x9: case 0xa: case 0xb:
8657 err = arm_decode_b_bl_ldmstm (gdbarch, insn, regs, dsc);
8658 break;
8659
8660 case 0xc: case 0xd: case 0xe: case 0xf:
8661 err = arm_decode_svc_copro (gdbarch, insn, to, regs, dsc);
8662 break;
8663 }
8664
8665 if (err)
8666 internal_error (__FILE__, __LINE__,
8667 _("arm_process_displaced_insn: Instruction decode error"));
8668 }
8669
8670 /* Actually set up the scratch space for a displaced instruction. */
8671
8672 void
8673 arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from,
8674 CORE_ADDR to, struct displaced_step_closure *dsc)
8675 {
8676 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8677 unsigned int i, len, offset;
8678 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
8679 int size = dsc->is_thumb? 2 : 4;
8680 const gdb_byte *bkp_insn;
8681
8682 offset = 0;
8683 /* Poke modified instruction(s). */
8684 for (i = 0; i < dsc->numinsns; i++)
8685 {
8686 if (debug_displaced)
8687 {
8688 fprintf_unfiltered (gdb_stdlog, "displaced: writing insn ");
8689 if (size == 4)
8690 fprintf_unfiltered (gdb_stdlog, "%.8lx",
8691 dsc->modinsn[i]);
8692 else if (size == 2)
8693 fprintf_unfiltered (gdb_stdlog, "%.4x",
8694 (unsigned short)dsc->modinsn[i]);
8695
8696 fprintf_unfiltered (gdb_stdlog, " at %.8lx\n",
8697 (unsigned long) to + offset);
8698
8699 }
8700 write_memory_unsigned_integer (to + offset, size,
8701 byte_order_for_code,
8702 dsc->modinsn[i]);
8703 offset += size;
8704 }
8705
8706 /* Choose the correct breakpoint instruction. */
8707 if (dsc->is_thumb)
8708 {
8709 bkp_insn = tdep->thumb_breakpoint;
8710 len = tdep->thumb_breakpoint_size;
8711 }
8712 else
8713 {
8714 bkp_insn = tdep->arm_breakpoint;
8715 len = tdep->arm_breakpoint_size;
8716 }
8717
8718 /* Put breakpoint afterwards. */
8719 write_memory (to + offset, bkp_insn, len);
8720
8721 if (debug_displaced)
8722 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
8723 paddress (gdbarch, from), paddress (gdbarch, to));
8724 }
8725
8726 /* Entry point for copying an instruction into scratch space for displaced
8727 stepping. */
8728
8729 struct displaced_step_closure *
8730 arm_displaced_step_copy_insn (struct gdbarch *gdbarch,
8731 CORE_ADDR from, CORE_ADDR to,
8732 struct regcache *regs)
8733 {
8734 struct displaced_step_closure *dsc
8735 = xmalloc (sizeof (struct displaced_step_closure));
8736 arm_process_displaced_insn (gdbarch, from, to, regs, dsc);
8737 arm_displaced_init_closure (gdbarch, from, to, dsc);
8738
8739 return dsc;
8740 }
8741
8742 /* Entry point for cleaning things up after a displaced instruction has been
8743 single-stepped. */
8744
8745 void
8746 arm_displaced_step_fixup (struct gdbarch *gdbarch,
8747 struct displaced_step_closure *dsc,
8748 CORE_ADDR from, CORE_ADDR to,
8749 struct regcache *regs)
8750 {
8751 if (dsc->cleanup)
8752 dsc->cleanup (gdbarch, regs, dsc);
8753
8754 if (!dsc->wrote_to_pc)
8755 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
8756 dsc->insn_addr + dsc->insn_size);
8757
8758 }
8759
8760 #include "bfd-in2.h"
8761 #include "libcoff.h"
8762
8763 static int
8764 gdb_print_insn_arm (bfd_vma memaddr, disassemble_info *info)
8765 {
8766 struct gdbarch *gdbarch = info->application_data;
8767
8768 if (arm_pc_is_thumb (gdbarch, memaddr))
8769 {
8770 static asymbol *asym;
8771 static combined_entry_type ce;
8772 static struct coff_symbol_struct csym;
8773 static struct bfd fake_bfd;
8774 static bfd_target fake_target;
8775
8776 if (csym.native == NULL)
8777 {
8778 /* Create a fake symbol vector containing a Thumb symbol.
8779 This is solely so that the code in print_insn_little_arm()
8780 and print_insn_big_arm() in opcodes/arm-dis.c will detect
8781 the presence of a Thumb symbol and switch to decoding
8782 Thumb instructions. */
8783
8784 fake_target.flavour = bfd_target_coff_flavour;
8785 fake_bfd.xvec = &fake_target;
8786 ce.u.syment.n_sclass = C_THUMBEXTFUNC;
8787 csym.native = &ce;
8788 csym.symbol.the_bfd = &fake_bfd;
8789 csym.symbol.name = "fake";
8790 asym = (asymbol *) & csym;
8791 }
8792
8793 memaddr = UNMAKE_THUMB_ADDR (memaddr);
8794 info->symbols = &asym;
8795 }
8796 else
8797 info->symbols = NULL;
8798
8799 if (info->endian == BFD_ENDIAN_BIG)
8800 return print_insn_big_arm (memaddr, info);
8801 else
8802 return print_insn_little_arm (memaddr, info);
8803 }
8804
8805 /* The following define instruction sequences that will cause ARM
8806 cpu's to take an undefined instruction trap. These are used to
8807 signal a breakpoint to GDB.
8808
8809 The newer ARMv4T cpu's are capable of operating in ARM or Thumb
8810 modes. A different instruction is required for each mode. The ARM
8811 cpu's can also be big or little endian. Thus four different
8812 instructions are needed to support all cases.
8813
8814 Note: ARMv4 defines several new instructions that will take the
8815 undefined instruction trap. ARM7TDMI is nominally ARMv4T, but does
8816 not in fact add the new instructions. The new undefined
8817 instructions in ARMv4 are all instructions that had no defined
8818 behaviour in earlier chips. There is no guarantee that they will
8819 raise an exception, but may be treated as NOP's. In practice, it
8820 may only safe to rely on instructions matching:
8821
8822 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
8823 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
8824 C C C C 0 1 1 x x x x x x x x x x x x x x x x x x x x 1 x x x x
8825
8826 Even this may only true if the condition predicate is true. The
8827 following use a condition predicate of ALWAYS so it is always TRUE.
8828
8829 There are other ways of forcing a breakpoint. GNU/Linux, RISC iX,
8830 and NetBSD all use a software interrupt rather than an undefined
8831 instruction to force a trap. This can be handled by by the
8832 abi-specific code during establishment of the gdbarch vector. */
8833
8834 #define ARM_LE_BREAKPOINT {0xFE,0xDE,0xFF,0xE7}
8835 #define ARM_BE_BREAKPOINT {0xE7,0xFF,0xDE,0xFE}
8836 #define THUMB_LE_BREAKPOINT {0xbe,0xbe}
8837 #define THUMB_BE_BREAKPOINT {0xbe,0xbe}
8838
8839 static const gdb_byte arm_default_arm_le_breakpoint[] = ARM_LE_BREAKPOINT;
8840 static const gdb_byte arm_default_arm_be_breakpoint[] = ARM_BE_BREAKPOINT;
8841 static const gdb_byte arm_default_thumb_le_breakpoint[] = THUMB_LE_BREAKPOINT;
8842 static const gdb_byte arm_default_thumb_be_breakpoint[] = THUMB_BE_BREAKPOINT;
8843
8844 /* Determine the type and size of breakpoint to insert at PCPTR. Uses
8845 the program counter value to determine whether a 16-bit or 32-bit
8846 breakpoint should be used. It returns a pointer to a string of
8847 bytes that encode a breakpoint instruction, stores the length of
8848 the string to *lenptr, and adjusts the program counter (if
8849 necessary) to point to the actual memory location where the
8850 breakpoint should be inserted. */
8851
8852 static const unsigned char *
8853 arm_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr, int *lenptr)
8854 {
8855 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8856 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
8857
8858 if (arm_pc_is_thumb (gdbarch, *pcptr))
8859 {
8860 *pcptr = UNMAKE_THUMB_ADDR (*pcptr);
8861
8862 /* If we have a separate 32-bit breakpoint instruction for Thumb-2,
8863 check whether we are replacing a 32-bit instruction. */
8864 if (tdep->thumb2_breakpoint != NULL)
8865 {
8866 gdb_byte buf[2];
8867 if (target_read_memory (*pcptr, buf, 2) == 0)
8868 {
8869 unsigned short inst1;
8870 inst1 = extract_unsigned_integer (buf, 2, byte_order_for_code);
8871 if (thumb_insn_size (inst1) == 4)
8872 {
8873 *lenptr = tdep->thumb2_breakpoint_size;
8874 return tdep->thumb2_breakpoint;
8875 }
8876 }
8877 }
8878
8879 *lenptr = tdep->thumb_breakpoint_size;
8880 return tdep->thumb_breakpoint;
8881 }
8882 else
8883 {
8884 *lenptr = tdep->arm_breakpoint_size;
8885 return tdep->arm_breakpoint;
8886 }
8887 }
8888
8889 static void
8890 arm_remote_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
8891 int *kindptr)
8892 {
8893 arm_breakpoint_from_pc (gdbarch, pcptr, kindptr);
8894
8895 if (arm_pc_is_thumb (gdbarch, *pcptr) && *kindptr == 4)
8896 /* The documented magic value for a 32-bit Thumb-2 breakpoint, so
8897 that this is not confused with a 32-bit ARM breakpoint. */
8898 *kindptr = 3;
8899 }
8900
8901 /* Extract from an array REGBUF containing the (raw) register state a
8902 function return value of type TYPE, and copy that, in virtual
8903 format, into VALBUF. */
8904
8905 static void
8906 arm_extract_return_value (struct type *type, struct regcache *regs,
8907 gdb_byte *valbuf)
8908 {
8909 struct gdbarch *gdbarch = get_regcache_arch (regs);
8910 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
8911
8912 if (TYPE_CODE_FLT == TYPE_CODE (type))
8913 {
8914 switch (gdbarch_tdep (gdbarch)->fp_model)
8915 {
8916 case ARM_FLOAT_FPA:
8917 {
8918 /* The value is in register F0 in internal format. We need to
8919 extract the raw value and then convert it to the desired
8920 internal type. */
8921 bfd_byte tmpbuf[FP_REGISTER_SIZE];
8922
8923 regcache_cooked_read (regs, ARM_F0_REGNUM, tmpbuf);
8924 convert_from_extended (floatformat_from_type (type), tmpbuf,
8925 valbuf, gdbarch_byte_order (gdbarch));
8926 }
8927 break;
8928
8929 case ARM_FLOAT_SOFT_FPA:
8930 case ARM_FLOAT_SOFT_VFP:
8931 /* ARM_FLOAT_VFP can arise if this is a variadic function so
8932 not using the VFP ABI code. */
8933 case ARM_FLOAT_VFP:
8934 regcache_cooked_read (regs, ARM_A1_REGNUM, valbuf);
8935 if (TYPE_LENGTH (type) > 4)
8936 regcache_cooked_read (regs, ARM_A1_REGNUM + 1,
8937 valbuf + INT_REGISTER_SIZE);
8938 break;
8939
8940 default:
8941 internal_error (__FILE__, __LINE__,
8942 _("arm_extract_return_value: "
8943 "Floating point model not supported"));
8944 break;
8945 }
8946 }
8947 else if (TYPE_CODE (type) == TYPE_CODE_INT
8948 || TYPE_CODE (type) == TYPE_CODE_CHAR
8949 || TYPE_CODE (type) == TYPE_CODE_BOOL
8950 || TYPE_CODE (type) == TYPE_CODE_PTR
8951 || TYPE_CODE (type) == TYPE_CODE_REF
8952 || TYPE_CODE (type) == TYPE_CODE_ENUM)
8953 {
8954 /* If the type is a plain integer, then the access is
8955 straight-forward. Otherwise we have to play around a bit
8956 more. */
8957 int len = TYPE_LENGTH (type);
8958 int regno = ARM_A1_REGNUM;
8959 ULONGEST tmp;
8960
8961 while (len > 0)
8962 {
8963 /* By using store_unsigned_integer we avoid having to do
8964 anything special for small big-endian values. */
8965 regcache_cooked_read_unsigned (regs, regno++, &tmp);
8966 store_unsigned_integer (valbuf,
8967 (len > INT_REGISTER_SIZE
8968 ? INT_REGISTER_SIZE : len),
8969 byte_order, tmp);
8970 len -= INT_REGISTER_SIZE;
8971 valbuf += INT_REGISTER_SIZE;
8972 }
8973 }
8974 else
8975 {
8976 /* For a structure or union the behaviour is as if the value had
8977 been stored to word-aligned memory and then loaded into
8978 registers with 32-bit load instruction(s). */
8979 int len = TYPE_LENGTH (type);
8980 int regno = ARM_A1_REGNUM;
8981 bfd_byte tmpbuf[INT_REGISTER_SIZE];
8982
8983 while (len > 0)
8984 {
8985 regcache_cooked_read (regs, regno++, tmpbuf);
8986 memcpy (valbuf, tmpbuf,
8987 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
8988 len -= INT_REGISTER_SIZE;
8989 valbuf += INT_REGISTER_SIZE;
8990 }
8991 }
8992 }
8993
8994
8995 /* Will a function return an aggregate type in memory or in a
8996 register? Return 0 if an aggregate type can be returned in a
8997 register, 1 if it must be returned in memory. */
8998
8999 static int
9000 arm_return_in_memory (struct gdbarch *gdbarch, struct type *type)
9001 {
9002 int nRc;
9003 enum type_code code;
9004
9005 CHECK_TYPEDEF (type);
9006
9007 /* In the ARM ABI, "integer" like aggregate types are returned in
9008 registers. For an aggregate type to be integer like, its size
9009 must be less than or equal to INT_REGISTER_SIZE and the
9010 offset of each addressable subfield must be zero. Note that bit
9011 fields are not addressable, and all addressable subfields of
9012 unions always start at offset zero.
9013
9014 This function is based on the behaviour of GCC 2.95.1.
9015 See: gcc/arm.c: arm_return_in_memory() for details.
9016
9017 Note: All versions of GCC before GCC 2.95.2 do not set up the
9018 parameters correctly for a function returning the following
9019 structure: struct { float f;}; This should be returned in memory,
9020 not a register. Richard Earnshaw sent me a patch, but I do not
9021 know of any way to detect if a function like the above has been
9022 compiled with the correct calling convention. */
9023
9024 /* All aggregate types that won't fit in a register must be returned
9025 in memory. */
9026 if (TYPE_LENGTH (type) > INT_REGISTER_SIZE)
9027 {
9028 return 1;
9029 }
9030
9031 /* The AAPCS says all aggregates not larger than a word are returned
9032 in a register. */
9033 if (gdbarch_tdep (gdbarch)->arm_abi != ARM_ABI_APCS)
9034 return 0;
9035
9036 /* The only aggregate types that can be returned in a register are
9037 structs and unions. Arrays must be returned in memory. */
9038 code = TYPE_CODE (type);
9039 if ((TYPE_CODE_STRUCT != code) && (TYPE_CODE_UNION != code))
9040 {
9041 return 1;
9042 }
9043
9044 /* Assume all other aggregate types can be returned in a register.
9045 Run a check for structures, unions and arrays. */
9046 nRc = 0;
9047
9048 if ((TYPE_CODE_STRUCT == code) || (TYPE_CODE_UNION == code))
9049 {
9050 int i;
9051 /* Need to check if this struct/union is "integer" like. For
9052 this to be true, its size must be less than or equal to
9053 INT_REGISTER_SIZE and the offset of each addressable
9054 subfield must be zero. Note that bit fields are not
9055 addressable, and unions always start at offset zero. If any
9056 of the subfields is a floating point type, the struct/union
9057 cannot be an integer type. */
9058
9059 /* For each field in the object, check:
9060 1) Is it FP? --> yes, nRc = 1;
9061 2) Is it addressable (bitpos != 0) and
9062 not packed (bitsize == 0)?
9063 --> yes, nRc = 1
9064 */
9065
9066 for (i = 0; i < TYPE_NFIELDS (type); i++)
9067 {
9068 enum type_code field_type_code;
9069 field_type_code = TYPE_CODE (check_typedef (TYPE_FIELD_TYPE (type,
9070 i)));
9071
9072 /* Is it a floating point type field? */
9073 if (field_type_code == TYPE_CODE_FLT)
9074 {
9075 nRc = 1;
9076 break;
9077 }
9078
9079 /* If bitpos != 0, then we have to care about it. */
9080 if (TYPE_FIELD_BITPOS (type, i) != 0)
9081 {
9082 /* Bitfields are not addressable. If the field bitsize is
9083 zero, then the field is not packed. Hence it cannot be
9084 a bitfield or any other packed type. */
9085 if (TYPE_FIELD_BITSIZE (type, i) == 0)
9086 {
9087 nRc = 1;
9088 break;
9089 }
9090 }
9091 }
9092 }
9093
9094 return nRc;
9095 }
9096
9097 /* Write into appropriate registers a function return value of type
9098 TYPE, given in virtual format. */
9099
9100 static void
9101 arm_store_return_value (struct type *type, struct regcache *regs,
9102 const gdb_byte *valbuf)
9103 {
9104 struct gdbarch *gdbarch = get_regcache_arch (regs);
9105 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
9106
9107 if (TYPE_CODE (type) == TYPE_CODE_FLT)
9108 {
9109 gdb_byte buf[MAX_REGISTER_SIZE];
9110
9111 switch (gdbarch_tdep (gdbarch)->fp_model)
9112 {
9113 case ARM_FLOAT_FPA:
9114
9115 convert_to_extended (floatformat_from_type (type), buf, valbuf,
9116 gdbarch_byte_order (gdbarch));
9117 regcache_cooked_write (regs, ARM_F0_REGNUM, buf);
9118 break;
9119
9120 case ARM_FLOAT_SOFT_FPA:
9121 case ARM_FLOAT_SOFT_VFP:
9122 /* ARM_FLOAT_VFP can arise if this is a variadic function so
9123 not using the VFP ABI code. */
9124 case ARM_FLOAT_VFP:
9125 regcache_cooked_write (regs, ARM_A1_REGNUM, valbuf);
9126 if (TYPE_LENGTH (type) > 4)
9127 regcache_cooked_write (regs, ARM_A1_REGNUM + 1,
9128 valbuf + INT_REGISTER_SIZE);
9129 break;
9130
9131 default:
9132 internal_error (__FILE__, __LINE__,
9133 _("arm_store_return_value: Floating "
9134 "point model not supported"));
9135 break;
9136 }
9137 }
9138 else if (TYPE_CODE (type) == TYPE_CODE_INT
9139 || TYPE_CODE (type) == TYPE_CODE_CHAR
9140 || TYPE_CODE (type) == TYPE_CODE_BOOL
9141 || TYPE_CODE (type) == TYPE_CODE_PTR
9142 || TYPE_CODE (type) == TYPE_CODE_REF
9143 || TYPE_CODE (type) == TYPE_CODE_ENUM)
9144 {
9145 if (TYPE_LENGTH (type) <= 4)
9146 {
9147 /* Values of one word or less are zero/sign-extended and
9148 returned in r0. */
9149 bfd_byte tmpbuf[INT_REGISTER_SIZE];
9150 LONGEST val = unpack_long (type, valbuf);
9151
9152 store_signed_integer (tmpbuf, INT_REGISTER_SIZE, byte_order, val);
9153 regcache_cooked_write (regs, ARM_A1_REGNUM, tmpbuf);
9154 }
9155 else
9156 {
9157 /* Integral values greater than one word are stored in consecutive
9158 registers starting with r0. This will always be a multiple of
9159 the regiser size. */
9160 int len = TYPE_LENGTH (type);
9161 int regno = ARM_A1_REGNUM;
9162
9163 while (len > 0)
9164 {
9165 regcache_cooked_write (regs, regno++, valbuf);
9166 len -= INT_REGISTER_SIZE;
9167 valbuf += INT_REGISTER_SIZE;
9168 }
9169 }
9170 }
9171 else
9172 {
9173 /* For a structure or union the behaviour is as if the value had
9174 been stored to word-aligned memory and then loaded into
9175 registers with 32-bit load instruction(s). */
9176 int len = TYPE_LENGTH (type);
9177 int regno = ARM_A1_REGNUM;
9178 bfd_byte tmpbuf[INT_REGISTER_SIZE];
9179
9180 while (len > 0)
9181 {
9182 memcpy (tmpbuf, valbuf,
9183 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
9184 regcache_cooked_write (regs, regno++, tmpbuf);
9185 len -= INT_REGISTER_SIZE;
9186 valbuf += INT_REGISTER_SIZE;
9187 }
9188 }
9189 }
9190
9191
9192 /* Handle function return values. */
9193
9194 static enum return_value_convention
9195 arm_return_value (struct gdbarch *gdbarch, struct value *function,
9196 struct type *valtype, struct regcache *regcache,
9197 gdb_byte *readbuf, const gdb_byte *writebuf)
9198 {
9199 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
9200 struct type *func_type = function ? value_type (function) : NULL;
9201 enum arm_vfp_cprc_base_type vfp_base_type;
9202 int vfp_base_count;
9203
9204 if (arm_vfp_abi_for_function (gdbarch, func_type)
9205 && arm_vfp_call_candidate (valtype, &vfp_base_type, &vfp_base_count))
9206 {
9207 int reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
9208 int unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
9209 int i;
9210 for (i = 0; i < vfp_base_count; i++)
9211 {
9212 if (reg_char == 'q')
9213 {
9214 if (writebuf)
9215 arm_neon_quad_write (gdbarch, regcache, i,
9216 writebuf + i * unit_length);
9217
9218 if (readbuf)
9219 arm_neon_quad_read (gdbarch, regcache, i,
9220 readbuf + i * unit_length);
9221 }
9222 else
9223 {
9224 char name_buf[4];
9225 int regnum;
9226
9227 xsnprintf (name_buf, sizeof (name_buf), "%c%d", reg_char, i);
9228 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9229 strlen (name_buf));
9230 if (writebuf)
9231 regcache_cooked_write (regcache, regnum,
9232 writebuf + i * unit_length);
9233 if (readbuf)
9234 regcache_cooked_read (regcache, regnum,
9235 readbuf + i * unit_length);
9236 }
9237 }
9238 return RETURN_VALUE_REGISTER_CONVENTION;
9239 }
9240
9241 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
9242 || TYPE_CODE (valtype) == TYPE_CODE_UNION
9243 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
9244 {
9245 if (tdep->struct_return == pcc_struct_return
9246 || arm_return_in_memory (gdbarch, valtype))
9247 return RETURN_VALUE_STRUCT_CONVENTION;
9248 }
9249
9250 /* AAPCS returns complex types longer than a register in memory. */
9251 if (tdep->arm_abi != ARM_ABI_APCS
9252 && TYPE_CODE (valtype) == TYPE_CODE_COMPLEX
9253 && TYPE_LENGTH (valtype) > INT_REGISTER_SIZE)
9254 return RETURN_VALUE_STRUCT_CONVENTION;
9255
9256 if (writebuf)
9257 arm_store_return_value (valtype, regcache, writebuf);
9258
9259 if (readbuf)
9260 arm_extract_return_value (valtype, regcache, readbuf);
9261
9262 return RETURN_VALUE_REGISTER_CONVENTION;
9263 }
9264
9265
9266 static int
9267 arm_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
9268 {
9269 struct gdbarch *gdbarch = get_frame_arch (frame);
9270 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
9271 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
9272 CORE_ADDR jb_addr;
9273 gdb_byte buf[INT_REGISTER_SIZE];
9274
9275 jb_addr = get_frame_register_unsigned (frame, ARM_A1_REGNUM);
9276
9277 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
9278 INT_REGISTER_SIZE))
9279 return 0;
9280
9281 *pc = extract_unsigned_integer (buf, INT_REGISTER_SIZE, byte_order);
9282 return 1;
9283 }
9284
9285 /* Recognize GCC and GNU ld's trampolines. If we are in a trampoline,
9286 return the target PC. Otherwise return 0. */
9287
9288 CORE_ADDR
9289 arm_skip_stub (struct frame_info *frame, CORE_ADDR pc)
9290 {
9291 const char *name;
9292 int namelen;
9293 CORE_ADDR start_addr;
9294
9295 /* Find the starting address and name of the function containing the PC. */
9296 if (find_pc_partial_function (pc, &name, &start_addr, NULL) == 0)
9297 {
9298 /* Trampoline 'bx reg' doesn't belong to any functions. Do the
9299 check here. */
9300 start_addr = arm_skip_bx_reg (frame, pc);
9301 if (start_addr != 0)
9302 return start_addr;
9303
9304 return 0;
9305 }
9306
9307 /* If PC is in a Thumb call or return stub, return the address of the
9308 target PC, which is in a register. The thunk functions are called
9309 _call_via_xx, where x is the register name. The possible names
9310 are r0-r9, sl, fp, ip, sp, and lr. ARM RealView has similar
9311 functions, named __ARM_call_via_r[0-7]. */
9312 if (strncmp (name, "_call_via_", 10) == 0
9313 || strncmp (name, "__ARM_call_via_", strlen ("__ARM_call_via_")) == 0)
9314 {
9315 /* Use the name suffix to determine which register contains the
9316 target PC. */
9317 static char *table[15] =
9318 {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
9319 "r8", "r9", "sl", "fp", "ip", "sp", "lr"
9320 };
9321 int regno;
9322 int offset = strlen (name) - 2;
9323
9324 for (regno = 0; regno <= 14; regno++)
9325 if (strcmp (&name[offset], table[regno]) == 0)
9326 return get_frame_register_unsigned (frame, regno);
9327 }
9328
9329 /* GNU ld generates __foo_from_arm or __foo_from_thumb for
9330 non-interworking calls to foo. We could decode the stubs
9331 to find the target but it's easier to use the symbol table. */
9332 namelen = strlen (name);
9333 if (name[0] == '_' && name[1] == '_'
9334 && ((namelen > 2 + strlen ("_from_thumb")
9335 && strncmp (name + namelen - strlen ("_from_thumb"), "_from_thumb",
9336 strlen ("_from_thumb")) == 0)
9337 || (namelen > 2 + strlen ("_from_arm")
9338 && strncmp (name + namelen - strlen ("_from_arm"), "_from_arm",
9339 strlen ("_from_arm")) == 0)))
9340 {
9341 char *target_name;
9342 int target_len = namelen - 2;
9343 struct bound_minimal_symbol minsym;
9344 struct objfile *objfile;
9345 struct obj_section *sec;
9346
9347 if (name[namelen - 1] == 'b')
9348 target_len -= strlen ("_from_thumb");
9349 else
9350 target_len -= strlen ("_from_arm");
9351
9352 target_name = alloca (target_len + 1);
9353 memcpy (target_name, name + 2, target_len);
9354 target_name[target_len] = '\0';
9355
9356 sec = find_pc_section (pc);
9357 objfile = (sec == NULL) ? NULL : sec->objfile;
9358 minsym = lookup_minimal_symbol (target_name, NULL, objfile);
9359 if (minsym.minsym != NULL)
9360 return BMSYMBOL_VALUE_ADDRESS (minsym);
9361 else
9362 return 0;
9363 }
9364
9365 return 0; /* not a stub */
9366 }
9367
9368 static void
9369 set_arm_command (char *args, int from_tty)
9370 {
9371 printf_unfiltered (_("\
9372 \"set arm\" must be followed by an apporpriate subcommand.\n"));
9373 help_list (setarmcmdlist, "set arm ", all_commands, gdb_stdout);
9374 }
9375
9376 static void
9377 show_arm_command (char *args, int from_tty)
9378 {
9379 cmd_show_list (showarmcmdlist, from_tty, "");
9380 }
9381
9382 static void
9383 arm_update_current_architecture (void)
9384 {
9385 struct gdbarch_info info;
9386
9387 /* If the current architecture is not ARM, we have nothing to do. */
9388 if (gdbarch_bfd_arch_info (target_gdbarch ())->arch != bfd_arch_arm)
9389 return;
9390
9391 /* Update the architecture. */
9392 gdbarch_info_init (&info);
9393
9394 if (!gdbarch_update_p (info))
9395 internal_error (__FILE__, __LINE__, _("could not update architecture"));
9396 }
9397
9398 static void
9399 set_fp_model_sfunc (char *args, int from_tty,
9400 struct cmd_list_element *c)
9401 {
9402 enum arm_float_model fp_model;
9403
9404 for (fp_model = ARM_FLOAT_AUTO; fp_model != ARM_FLOAT_LAST; fp_model++)
9405 if (strcmp (current_fp_model, fp_model_strings[fp_model]) == 0)
9406 {
9407 arm_fp_model = fp_model;
9408 break;
9409 }
9410
9411 if (fp_model == ARM_FLOAT_LAST)
9412 internal_error (__FILE__, __LINE__, _("Invalid fp model accepted: %s."),
9413 current_fp_model);
9414
9415 arm_update_current_architecture ();
9416 }
9417
9418 static void
9419 show_fp_model (struct ui_file *file, int from_tty,
9420 struct cmd_list_element *c, const char *value)
9421 {
9422 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch ());
9423
9424 if (arm_fp_model == ARM_FLOAT_AUTO
9425 && gdbarch_bfd_arch_info (target_gdbarch ())->arch == bfd_arch_arm)
9426 fprintf_filtered (file, _("\
9427 The current ARM floating point model is \"auto\" (currently \"%s\").\n"),
9428 fp_model_strings[tdep->fp_model]);
9429 else
9430 fprintf_filtered (file, _("\
9431 The current ARM floating point model is \"%s\".\n"),
9432 fp_model_strings[arm_fp_model]);
9433 }
9434
9435 static void
9436 arm_set_abi (char *args, int from_tty,
9437 struct cmd_list_element *c)
9438 {
9439 enum arm_abi_kind arm_abi;
9440
9441 for (arm_abi = ARM_ABI_AUTO; arm_abi != ARM_ABI_LAST; arm_abi++)
9442 if (strcmp (arm_abi_string, arm_abi_strings[arm_abi]) == 0)
9443 {
9444 arm_abi_global = arm_abi;
9445 break;
9446 }
9447
9448 if (arm_abi == ARM_ABI_LAST)
9449 internal_error (__FILE__, __LINE__, _("Invalid ABI accepted: %s."),
9450 arm_abi_string);
9451
9452 arm_update_current_architecture ();
9453 }
9454
9455 static void
9456 arm_show_abi (struct ui_file *file, int from_tty,
9457 struct cmd_list_element *c, const char *value)
9458 {
9459 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch ());
9460
9461 if (arm_abi_global == ARM_ABI_AUTO
9462 && gdbarch_bfd_arch_info (target_gdbarch ())->arch == bfd_arch_arm)
9463 fprintf_filtered (file, _("\
9464 The current ARM ABI is \"auto\" (currently \"%s\").\n"),
9465 arm_abi_strings[tdep->arm_abi]);
9466 else
9467 fprintf_filtered (file, _("The current ARM ABI is \"%s\".\n"),
9468 arm_abi_string);
9469 }
9470
9471 static void
9472 arm_show_fallback_mode (struct ui_file *file, int from_tty,
9473 struct cmd_list_element *c, const char *value)
9474 {
9475 fprintf_filtered (file,
9476 _("The current execution mode assumed "
9477 "(when symbols are unavailable) is \"%s\".\n"),
9478 arm_fallback_mode_string);
9479 }
9480
9481 static void
9482 arm_show_force_mode (struct ui_file *file, int from_tty,
9483 struct cmd_list_element *c, const char *value)
9484 {
9485 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch ());
9486
9487 fprintf_filtered (file,
9488 _("The current execution mode assumed "
9489 "(even when symbols are available) is \"%s\".\n"),
9490 arm_force_mode_string);
9491 }
9492
9493 /* If the user changes the register disassembly style used for info
9494 register and other commands, we have to also switch the style used
9495 in opcodes for disassembly output. This function is run in the "set
9496 arm disassembly" command, and does that. */
9497
9498 static void
9499 set_disassembly_style_sfunc (char *args, int from_tty,
9500 struct cmd_list_element *c)
9501 {
9502 set_disassembly_style ();
9503 }
9504 \f
9505 /* Return the ARM register name corresponding to register I. */
9506 static const char *
9507 arm_register_name (struct gdbarch *gdbarch, int i)
9508 {
9509 const int num_regs = gdbarch_num_regs (gdbarch);
9510
9511 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
9512 && i >= num_regs && i < num_regs + 32)
9513 {
9514 static const char *const vfp_pseudo_names[] = {
9515 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
9516 "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
9517 "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
9518 "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
9519 };
9520
9521 return vfp_pseudo_names[i - num_regs];
9522 }
9523
9524 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
9525 && i >= num_regs + 32 && i < num_regs + 32 + 16)
9526 {
9527 static const char *const neon_pseudo_names[] = {
9528 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
9529 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
9530 };
9531
9532 return neon_pseudo_names[i - num_regs - 32];
9533 }
9534
9535 if (i >= ARRAY_SIZE (arm_register_names))
9536 /* These registers are only supported on targets which supply
9537 an XML description. */
9538 return "";
9539
9540 return arm_register_names[i];
9541 }
9542
9543 static void
9544 set_disassembly_style (void)
9545 {
9546 int current;
9547
9548 /* Find the style that the user wants. */
9549 for (current = 0; current < num_disassembly_options; current++)
9550 if (disassembly_style == valid_disassembly_styles[current])
9551 break;
9552 gdb_assert (current < num_disassembly_options);
9553
9554 /* Synchronize the disassembler. */
9555 set_arm_regname_option (current);
9556 }
9557
9558 /* Test whether the coff symbol specific value corresponds to a Thumb
9559 function. */
9560
9561 static int
9562 coff_sym_is_thumb (int val)
9563 {
9564 return (val == C_THUMBEXT
9565 || val == C_THUMBSTAT
9566 || val == C_THUMBEXTFUNC
9567 || val == C_THUMBSTATFUNC
9568 || val == C_THUMBLABEL);
9569 }
9570
9571 /* arm_coff_make_msymbol_special()
9572 arm_elf_make_msymbol_special()
9573
9574 These functions test whether the COFF or ELF symbol corresponds to
9575 an address in thumb code, and set a "special" bit in a minimal
9576 symbol to indicate that it does. */
9577
9578 static void
9579 arm_elf_make_msymbol_special(asymbol *sym, struct minimal_symbol *msym)
9580 {
9581 if (ARM_SYM_BRANCH_TYPE (&((elf_symbol_type *)sym)->internal_elf_sym)
9582 == ST_BRANCH_TO_THUMB)
9583 MSYMBOL_SET_SPECIAL (msym);
9584 }
9585
9586 static void
9587 arm_coff_make_msymbol_special(int val, struct minimal_symbol *msym)
9588 {
9589 if (coff_sym_is_thumb (val))
9590 MSYMBOL_SET_SPECIAL (msym);
9591 }
9592
9593 static void
9594 arm_objfile_data_free (struct objfile *objfile, void *arg)
9595 {
9596 struct arm_per_objfile *data = arg;
9597 unsigned int i;
9598
9599 for (i = 0; i < objfile->obfd->section_count; i++)
9600 VEC_free (arm_mapping_symbol_s, data->section_maps[i]);
9601 }
9602
9603 static void
9604 arm_record_special_symbol (struct gdbarch *gdbarch, struct objfile *objfile,
9605 asymbol *sym)
9606 {
9607 const char *name = bfd_asymbol_name (sym);
9608 struct arm_per_objfile *data;
9609 VEC(arm_mapping_symbol_s) **map_p;
9610 struct arm_mapping_symbol new_map_sym;
9611
9612 gdb_assert (name[0] == '$');
9613 if (name[1] != 'a' && name[1] != 't' && name[1] != 'd')
9614 return;
9615
9616 data = objfile_data (objfile, arm_objfile_data_key);
9617 if (data == NULL)
9618 {
9619 data = OBSTACK_ZALLOC (&objfile->objfile_obstack,
9620 struct arm_per_objfile);
9621 set_objfile_data (objfile, arm_objfile_data_key, data);
9622 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
9623 objfile->obfd->section_count,
9624 VEC(arm_mapping_symbol_s) *);
9625 }
9626 map_p = &data->section_maps[bfd_get_section (sym)->index];
9627
9628 new_map_sym.value = sym->value;
9629 new_map_sym.type = name[1];
9630
9631 /* Assume that most mapping symbols appear in order of increasing
9632 value. If they were randomly distributed, it would be faster to
9633 always push here and then sort at first use. */
9634 if (!VEC_empty (arm_mapping_symbol_s, *map_p))
9635 {
9636 struct arm_mapping_symbol *prev_map_sym;
9637
9638 prev_map_sym = VEC_last (arm_mapping_symbol_s, *map_p);
9639 if (prev_map_sym->value >= sym->value)
9640 {
9641 unsigned int idx;
9642 idx = VEC_lower_bound (arm_mapping_symbol_s, *map_p, &new_map_sym,
9643 arm_compare_mapping_symbols);
9644 VEC_safe_insert (arm_mapping_symbol_s, *map_p, idx, &new_map_sym);
9645 return;
9646 }
9647 }
9648
9649 VEC_safe_push (arm_mapping_symbol_s, *map_p, &new_map_sym);
9650 }
9651
9652 static void
9653 arm_write_pc (struct regcache *regcache, CORE_ADDR pc)
9654 {
9655 struct gdbarch *gdbarch = get_regcache_arch (regcache);
9656 regcache_cooked_write_unsigned (regcache, ARM_PC_REGNUM, pc);
9657
9658 /* If necessary, set the T bit. */
9659 if (arm_apcs_32)
9660 {
9661 ULONGEST val, t_bit;
9662 regcache_cooked_read_unsigned (regcache, ARM_PS_REGNUM, &val);
9663 t_bit = arm_psr_thumb_bit (gdbarch);
9664 if (arm_pc_is_thumb (gdbarch, pc))
9665 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
9666 val | t_bit);
9667 else
9668 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
9669 val & ~t_bit);
9670 }
9671 }
9672
9673 /* Read the contents of a NEON quad register, by reading from two
9674 double registers. This is used to implement the quad pseudo
9675 registers, and for argument passing in case the quad registers are
9676 missing; vectors are passed in quad registers when using the VFP
9677 ABI, even if a NEON unit is not present. REGNUM is the index of
9678 the quad register, in [0, 15]. */
9679
9680 static enum register_status
9681 arm_neon_quad_read (struct gdbarch *gdbarch, struct regcache *regcache,
9682 int regnum, gdb_byte *buf)
9683 {
9684 char name_buf[4];
9685 gdb_byte reg_buf[8];
9686 int offset, double_regnum;
9687 enum register_status status;
9688
9689 xsnprintf (name_buf, sizeof (name_buf), "d%d", regnum << 1);
9690 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9691 strlen (name_buf));
9692
9693 /* d0 is always the least significant half of q0. */
9694 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
9695 offset = 8;
9696 else
9697 offset = 0;
9698
9699 status = regcache_raw_read (regcache, double_regnum, reg_buf);
9700 if (status != REG_VALID)
9701 return status;
9702 memcpy (buf + offset, reg_buf, 8);
9703
9704 offset = 8 - offset;
9705 status = regcache_raw_read (regcache, double_regnum + 1, reg_buf);
9706 if (status != REG_VALID)
9707 return status;
9708 memcpy (buf + offset, reg_buf, 8);
9709
9710 return REG_VALID;
9711 }
9712
9713 static enum register_status
9714 arm_pseudo_read (struct gdbarch *gdbarch, struct regcache *regcache,
9715 int regnum, gdb_byte *buf)
9716 {
9717 const int num_regs = gdbarch_num_regs (gdbarch);
9718 char name_buf[4];
9719 gdb_byte reg_buf[8];
9720 int offset, double_regnum;
9721
9722 gdb_assert (regnum >= num_regs);
9723 regnum -= num_regs;
9724
9725 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
9726 /* Quad-precision register. */
9727 return arm_neon_quad_read (gdbarch, regcache, regnum - 32, buf);
9728 else
9729 {
9730 enum register_status status;
9731
9732 /* Single-precision register. */
9733 gdb_assert (regnum < 32);
9734
9735 /* s0 is always the least significant half of d0. */
9736 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
9737 offset = (regnum & 1) ? 0 : 4;
9738 else
9739 offset = (regnum & 1) ? 4 : 0;
9740
9741 xsnprintf (name_buf, sizeof (name_buf), "d%d", regnum >> 1);
9742 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9743 strlen (name_buf));
9744
9745 status = regcache_raw_read (regcache, double_regnum, reg_buf);
9746 if (status == REG_VALID)
9747 memcpy (buf, reg_buf + offset, 4);
9748 return status;
9749 }
9750 }
9751
9752 /* Store the contents of BUF to a NEON quad register, by writing to
9753 two double registers. This is used to implement the quad pseudo
9754 registers, and for argument passing in case the quad registers are
9755 missing; vectors are passed in quad registers when using the VFP
9756 ABI, even if a NEON unit is not present. REGNUM is the index
9757 of the quad register, in [0, 15]. */
9758
9759 static void
9760 arm_neon_quad_write (struct gdbarch *gdbarch, struct regcache *regcache,
9761 int regnum, const gdb_byte *buf)
9762 {
9763 char name_buf[4];
9764 int offset, double_regnum;
9765
9766 xsnprintf (name_buf, sizeof (name_buf), "d%d", regnum << 1);
9767 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9768 strlen (name_buf));
9769
9770 /* d0 is always the least significant half of q0. */
9771 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
9772 offset = 8;
9773 else
9774 offset = 0;
9775
9776 regcache_raw_write (regcache, double_regnum, buf + offset);
9777 offset = 8 - offset;
9778 regcache_raw_write (regcache, double_regnum + 1, buf + offset);
9779 }
9780
9781 static void
9782 arm_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
9783 int regnum, const gdb_byte *buf)
9784 {
9785 const int num_regs = gdbarch_num_regs (gdbarch);
9786 char name_buf[4];
9787 gdb_byte reg_buf[8];
9788 int offset, double_regnum;
9789
9790 gdb_assert (regnum >= num_regs);
9791 regnum -= num_regs;
9792
9793 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
9794 /* Quad-precision register. */
9795 arm_neon_quad_write (gdbarch, regcache, regnum - 32, buf);
9796 else
9797 {
9798 /* Single-precision register. */
9799 gdb_assert (regnum < 32);
9800
9801 /* s0 is always the least significant half of d0. */
9802 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
9803 offset = (regnum & 1) ? 0 : 4;
9804 else
9805 offset = (regnum & 1) ? 4 : 0;
9806
9807 xsnprintf (name_buf, sizeof (name_buf), "d%d", regnum >> 1);
9808 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9809 strlen (name_buf));
9810
9811 regcache_raw_read (regcache, double_regnum, reg_buf);
9812 memcpy (reg_buf + offset, buf, 4);
9813 regcache_raw_write (regcache, double_regnum, reg_buf);
9814 }
9815 }
9816
9817 static struct value *
9818 value_of_arm_user_reg (struct frame_info *frame, const void *baton)
9819 {
9820 const int *reg_p = baton;
9821 return value_of_register (*reg_p, frame);
9822 }
9823 \f
9824 static enum gdb_osabi
9825 arm_elf_osabi_sniffer (bfd *abfd)
9826 {
9827 unsigned int elfosabi;
9828 enum gdb_osabi osabi = GDB_OSABI_UNKNOWN;
9829
9830 elfosabi = elf_elfheader (abfd)->e_ident[EI_OSABI];
9831
9832 if (elfosabi == ELFOSABI_ARM)
9833 /* GNU tools use this value. Check note sections in this case,
9834 as well. */
9835 bfd_map_over_sections (abfd,
9836 generic_elf_osabi_sniff_abi_tag_sections,
9837 &osabi);
9838
9839 /* Anything else will be handled by the generic ELF sniffer. */
9840 return osabi;
9841 }
9842
9843 static int
9844 arm_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
9845 struct reggroup *group)
9846 {
9847 /* FPS register's type is INT, but belongs to float_reggroup. Beside
9848 this, FPS register belongs to save_regroup, restore_reggroup, and
9849 all_reggroup, of course. */
9850 if (regnum == ARM_FPS_REGNUM)
9851 return (group == float_reggroup
9852 || group == save_reggroup
9853 || group == restore_reggroup
9854 || group == all_reggroup);
9855 else
9856 return default_register_reggroup_p (gdbarch, regnum, group);
9857 }
9858
9859 \f
9860 /* For backward-compatibility we allow two 'g' packet lengths with
9861 the remote protocol depending on whether FPA registers are
9862 supplied. M-profile targets do not have FPA registers, but some
9863 stubs already exist in the wild which use a 'g' packet which
9864 supplies them albeit with dummy values. The packet format which
9865 includes FPA registers should be considered deprecated for
9866 M-profile targets. */
9867
9868 static void
9869 arm_register_g_packet_guesses (struct gdbarch *gdbarch)
9870 {
9871 if (gdbarch_tdep (gdbarch)->is_m)
9872 {
9873 /* If we know from the executable this is an M-profile target,
9874 cater for remote targets whose register set layout is the
9875 same as the FPA layout. */
9876 register_remote_g_packet_guess (gdbarch,
9877 /* r0-r12,sp,lr,pc; f0-f7; fps,xpsr */
9878 (16 * INT_REGISTER_SIZE)
9879 + (8 * FP_REGISTER_SIZE)
9880 + (2 * INT_REGISTER_SIZE),
9881 tdesc_arm_with_m_fpa_layout);
9882
9883 /* The regular M-profile layout. */
9884 register_remote_g_packet_guess (gdbarch,
9885 /* r0-r12,sp,lr,pc; xpsr */
9886 (16 * INT_REGISTER_SIZE)
9887 + INT_REGISTER_SIZE,
9888 tdesc_arm_with_m);
9889
9890 /* M-profile plus M4F VFP. */
9891 register_remote_g_packet_guess (gdbarch,
9892 /* r0-r12,sp,lr,pc; d0-d15; fpscr,xpsr */
9893 (16 * INT_REGISTER_SIZE)
9894 + (16 * VFP_REGISTER_SIZE)
9895 + (2 * INT_REGISTER_SIZE),
9896 tdesc_arm_with_m_vfp_d16);
9897 }
9898
9899 /* Otherwise we don't have a useful guess. */
9900 }
9901
9902 \f
9903 /* Initialize the current architecture based on INFO. If possible,
9904 re-use an architecture from ARCHES, which is a list of
9905 architectures already created during this debugging session.
9906
9907 Called e.g. at program startup, when reading a core file, and when
9908 reading a binary file. */
9909
9910 static struct gdbarch *
9911 arm_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
9912 {
9913 struct gdbarch_tdep *tdep;
9914 struct gdbarch *gdbarch;
9915 struct gdbarch_list *best_arch;
9916 enum arm_abi_kind arm_abi = arm_abi_global;
9917 enum arm_float_model fp_model = arm_fp_model;
9918 struct tdesc_arch_data *tdesc_data = NULL;
9919 int i, is_m = 0;
9920 int have_vfp_registers = 0, have_vfp_pseudos = 0, have_neon_pseudos = 0;
9921 int have_neon = 0;
9922 int have_fpa_registers = 1;
9923 const struct target_desc *tdesc = info.target_desc;
9924
9925 /* If we have an object to base this architecture on, try to determine
9926 its ABI. */
9927
9928 if (arm_abi == ARM_ABI_AUTO && info.abfd != NULL)
9929 {
9930 int ei_osabi, e_flags;
9931
9932 switch (bfd_get_flavour (info.abfd))
9933 {
9934 case bfd_target_aout_flavour:
9935 /* Assume it's an old APCS-style ABI. */
9936 arm_abi = ARM_ABI_APCS;
9937 break;
9938
9939 case bfd_target_coff_flavour:
9940 /* Assume it's an old APCS-style ABI. */
9941 /* XXX WinCE? */
9942 arm_abi = ARM_ABI_APCS;
9943 break;
9944
9945 case bfd_target_elf_flavour:
9946 ei_osabi = elf_elfheader (info.abfd)->e_ident[EI_OSABI];
9947 e_flags = elf_elfheader (info.abfd)->e_flags;
9948
9949 if (ei_osabi == ELFOSABI_ARM)
9950 {
9951 /* GNU tools used to use this value, but do not for EABI
9952 objects. There's nowhere to tag an EABI version
9953 anyway, so assume APCS. */
9954 arm_abi = ARM_ABI_APCS;
9955 }
9956 else if (ei_osabi == ELFOSABI_NONE)
9957 {
9958 int eabi_ver = EF_ARM_EABI_VERSION (e_flags);
9959 int attr_arch, attr_profile;
9960
9961 switch (eabi_ver)
9962 {
9963 case EF_ARM_EABI_UNKNOWN:
9964 /* Assume GNU tools. */
9965 arm_abi = ARM_ABI_APCS;
9966 break;
9967
9968 case EF_ARM_EABI_VER4:
9969 case EF_ARM_EABI_VER5:
9970 arm_abi = ARM_ABI_AAPCS;
9971 /* EABI binaries default to VFP float ordering.
9972 They may also contain build attributes that can
9973 be used to identify if the VFP argument-passing
9974 ABI is in use. */
9975 if (fp_model == ARM_FLOAT_AUTO)
9976 {
9977 #ifdef HAVE_ELF
9978 switch (bfd_elf_get_obj_attr_int (info.abfd,
9979 OBJ_ATTR_PROC,
9980 Tag_ABI_VFP_args))
9981 {
9982 case 0:
9983 /* "The user intended FP parameter/result
9984 passing to conform to AAPCS, base
9985 variant". */
9986 fp_model = ARM_FLOAT_SOFT_VFP;
9987 break;
9988 case 1:
9989 /* "The user intended FP parameter/result
9990 passing to conform to AAPCS, VFP
9991 variant". */
9992 fp_model = ARM_FLOAT_VFP;
9993 break;
9994 case 2:
9995 /* "The user intended FP parameter/result
9996 passing to conform to tool chain-specific
9997 conventions" - we don't know any such
9998 conventions, so leave it as "auto". */
9999 break;
10000 default:
10001 /* Attribute value not mentioned in the
10002 October 2008 ABI, so leave it as
10003 "auto". */
10004 break;
10005 }
10006 #else
10007 fp_model = ARM_FLOAT_SOFT_VFP;
10008 #endif
10009 }
10010 break;
10011
10012 default:
10013 /* Leave it as "auto". */
10014 warning (_("unknown ARM EABI version 0x%x"), eabi_ver);
10015 break;
10016 }
10017
10018 #ifdef HAVE_ELF
10019 /* Detect M-profile programs. This only works if the
10020 executable file includes build attributes; GCC does
10021 copy them to the executable, but e.g. RealView does
10022 not. */
10023 attr_arch = bfd_elf_get_obj_attr_int (info.abfd, OBJ_ATTR_PROC,
10024 Tag_CPU_arch);
10025 attr_profile = bfd_elf_get_obj_attr_int (info.abfd,
10026 OBJ_ATTR_PROC,
10027 Tag_CPU_arch_profile);
10028 /* GCC specifies the profile for v6-M; RealView only
10029 specifies the profile for architectures starting with
10030 V7 (as opposed to architectures with a tag
10031 numerically greater than TAG_CPU_ARCH_V7). */
10032 if (!tdesc_has_registers (tdesc)
10033 && (attr_arch == TAG_CPU_ARCH_V6_M
10034 || attr_arch == TAG_CPU_ARCH_V6S_M
10035 || attr_profile == 'M'))
10036 is_m = 1;
10037 #endif
10038 }
10039
10040 if (fp_model == ARM_FLOAT_AUTO)
10041 {
10042 int e_flags = elf_elfheader (info.abfd)->e_flags;
10043
10044 switch (e_flags & (EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT))
10045 {
10046 case 0:
10047 /* Leave it as "auto". Strictly speaking this case
10048 means FPA, but almost nobody uses that now, and
10049 many toolchains fail to set the appropriate bits
10050 for the floating-point model they use. */
10051 break;
10052 case EF_ARM_SOFT_FLOAT:
10053 fp_model = ARM_FLOAT_SOFT_FPA;
10054 break;
10055 case EF_ARM_VFP_FLOAT:
10056 fp_model = ARM_FLOAT_VFP;
10057 break;
10058 case EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT:
10059 fp_model = ARM_FLOAT_SOFT_VFP;
10060 break;
10061 }
10062 }
10063
10064 if (e_flags & EF_ARM_BE8)
10065 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
10066
10067 break;
10068
10069 default:
10070 /* Leave it as "auto". */
10071 break;
10072 }
10073 }
10074
10075 /* Check any target description for validity. */
10076 if (tdesc_has_registers (tdesc))
10077 {
10078 /* For most registers we require GDB's default names; but also allow
10079 the numeric names for sp / lr / pc, as a convenience. */
10080 static const char *const arm_sp_names[] = { "r13", "sp", NULL };
10081 static const char *const arm_lr_names[] = { "r14", "lr", NULL };
10082 static const char *const arm_pc_names[] = { "r15", "pc", NULL };
10083
10084 const struct tdesc_feature *feature;
10085 int valid_p;
10086
10087 feature = tdesc_find_feature (tdesc,
10088 "org.gnu.gdb.arm.core");
10089 if (feature == NULL)
10090 {
10091 feature = tdesc_find_feature (tdesc,
10092 "org.gnu.gdb.arm.m-profile");
10093 if (feature == NULL)
10094 return NULL;
10095 else
10096 is_m = 1;
10097 }
10098
10099 tdesc_data = tdesc_data_alloc ();
10100
10101 valid_p = 1;
10102 for (i = 0; i < ARM_SP_REGNUM; i++)
10103 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
10104 arm_register_names[i]);
10105 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
10106 ARM_SP_REGNUM,
10107 arm_sp_names);
10108 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
10109 ARM_LR_REGNUM,
10110 arm_lr_names);
10111 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
10112 ARM_PC_REGNUM,
10113 arm_pc_names);
10114 if (is_m)
10115 valid_p &= tdesc_numbered_register (feature, tdesc_data,
10116 ARM_PS_REGNUM, "xpsr");
10117 else
10118 valid_p &= tdesc_numbered_register (feature, tdesc_data,
10119 ARM_PS_REGNUM, "cpsr");
10120
10121 if (!valid_p)
10122 {
10123 tdesc_data_cleanup (tdesc_data);
10124 return NULL;
10125 }
10126
10127 feature = tdesc_find_feature (tdesc,
10128 "org.gnu.gdb.arm.fpa");
10129 if (feature != NULL)
10130 {
10131 valid_p = 1;
10132 for (i = ARM_F0_REGNUM; i <= ARM_FPS_REGNUM; i++)
10133 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
10134 arm_register_names[i]);
10135 if (!valid_p)
10136 {
10137 tdesc_data_cleanup (tdesc_data);
10138 return NULL;
10139 }
10140 }
10141 else
10142 have_fpa_registers = 0;
10143
10144 feature = tdesc_find_feature (tdesc,
10145 "org.gnu.gdb.xscale.iwmmxt");
10146 if (feature != NULL)
10147 {
10148 static const char *const iwmmxt_names[] = {
10149 "wR0", "wR1", "wR2", "wR3", "wR4", "wR5", "wR6", "wR7",
10150 "wR8", "wR9", "wR10", "wR11", "wR12", "wR13", "wR14", "wR15",
10151 "wCID", "wCon", "wCSSF", "wCASF", "", "", "", "",
10152 "wCGR0", "wCGR1", "wCGR2", "wCGR3", "", "", "", "",
10153 };
10154
10155 valid_p = 1;
10156 for (i = ARM_WR0_REGNUM; i <= ARM_WR15_REGNUM; i++)
10157 valid_p
10158 &= tdesc_numbered_register (feature, tdesc_data, i,
10159 iwmmxt_names[i - ARM_WR0_REGNUM]);
10160
10161 /* Check for the control registers, but do not fail if they
10162 are missing. */
10163 for (i = ARM_WC0_REGNUM; i <= ARM_WCASF_REGNUM; i++)
10164 tdesc_numbered_register (feature, tdesc_data, i,
10165 iwmmxt_names[i - ARM_WR0_REGNUM]);
10166
10167 for (i = ARM_WCGR0_REGNUM; i <= ARM_WCGR3_REGNUM; i++)
10168 valid_p
10169 &= tdesc_numbered_register (feature, tdesc_data, i,
10170 iwmmxt_names[i - ARM_WR0_REGNUM]);
10171
10172 if (!valid_p)
10173 {
10174 tdesc_data_cleanup (tdesc_data);
10175 return NULL;
10176 }
10177 }
10178
10179 /* If we have a VFP unit, check whether the single precision registers
10180 are present. If not, then we will synthesize them as pseudo
10181 registers. */
10182 feature = tdesc_find_feature (tdesc,
10183 "org.gnu.gdb.arm.vfp");
10184 if (feature != NULL)
10185 {
10186 static const char *const vfp_double_names[] = {
10187 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
10188 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
10189 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
10190 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
10191 };
10192
10193 /* Require the double precision registers. There must be either
10194 16 or 32. */
10195 valid_p = 1;
10196 for (i = 0; i < 32; i++)
10197 {
10198 valid_p &= tdesc_numbered_register (feature, tdesc_data,
10199 ARM_D0_REGNUM + i,
10200 vfp_double_names[i]);
10201 if (!valid_p)
10202 break;
10203 }
10204 if (!valid_p && i == 16)
10205 valid_p = 1;
10206
10207 /* Also require FPSCR. */
10208 valid_p &= tdesc_numbered_register (feature, tdesc_data,
10209 ARM_FPSCR_REGNUM, "fpscr");
10210 if (!valid_p)
10211 {
10212 tdesc_data_cleanup (tdesc_data);
10213 return NULL;
10214 }
10215
10216 if (tdesc_unnumbered_register (feature, "s0") == 0)
10217 have_vfp_pseudos = 1;
10218
10219 have_vfp_registers = 1;
10220
10221 /* If we have VFP, also check for NEON. The architecture allows
10222 NEON without VFP (integer vector operations only), but GDB
10223 does not support that. */
10224 feature = tdesc_find_feature (tdesc,
10225 "org.gnu.gdb.arm.neon");
10226 if (feature != NULL)
10227 {
10228 /* NEON requires 32 double-precision registers. */
10229 if (i != 32)
10230 {
10231 tdesc_data_cleanup (tdesc_data);
10232 return NULL;
10233 }
10234
10235 /* If there are quad registers defined by the stub, use
10236 their type; otherwise (normally) provide them with
10237 the default type. */
10238 if (tdesc_unnumbered_register (feature, "q0") == 0)
10239 have_neon_pseudos = 1;
10240
10241 have_neon = 1;
10242 }
10243 }
10244 }
10245
10246 /* If there is already a candidate, use it. */
10247 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
10248 best_arch != NULL;
10249 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
10250 {
10251 if (arm_abi != ARM_ABI_AUTO
10252 && arm_abi != gdbarch_tdep (best_arch->gdbarch)->arm_abi)
10253 continue;
10254
10255 if (fp_model != ARM_FLOAT_AUTO
10256 && fp_model != gdbarch_tdep (best_arch->gdbarch)->fp_model)
10257 continue;
10258
10259 /* There are various other properties in tdep that we do not
10260 need to check here: those derived from a target description,
10261 since gdbarches with a different target description are
10262 automatically disqualified. */
10263
10264 /* Do check is_m, though, since it might come from the binary. */
10265 if (is_m != gdbarch_tdep (best_arch->gdbarch)->is_m)
10266 continue;
10267
10268 /* Found a match. */
10269 break;
10270 }
10271
10272 if (best_arch != NULL)
10273 {
10274 if (tdesc_data != NULL)
10275 tdesc_data_cleanup (tdesc_data);
10276 return best_arch->gdbarch;
10277 }
10278
10279 tdep = xcalloc (1, sizeof (struct gdbarch_tdep));
10280 gdbarch = gdbarch_alloc (&info, tdep);
10281
10282 /* Record additional information about the architecture we are defining.
10283 These are gdbarch discriminators, like the OSABI. */
10284 tdep->arm_abi = arm_abi;
10285 tdep->fp_model = fp_model;
10286 tdep->is_m = is_m;
10287 tdep->have_fpa_registers = have_fpa_registers;
10288 tdep->have_vfp_registers = have_vfp_registers;
10289 tdep->have_vfp_pseudos = have_vfp_pseudos;
10290 tdep->have_neon_pseudos = have_neon_pseudos;
10291 tdep->have_neon = have_neon;
10292
10293 arm_register_g_packet_guesses (gdbarch);
10294
10295 /* Breakpoints. */
10296 switch (info.byte_order_for_code)
10297 {
10298 case BFD_ENDIAN_BIG:
10299 tdep->arm_breakpoint = arm_default_arm_be_breakpoint;
10300 tdep->arm_breakpoint_size = sizeof (arm_default_arm_be_breakpoint);
10301 tdep->thumb_breakpoint = arm_default_thumb_be_breakpoint;
10302 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_be_breakpoint);
10303
10304 break;
10305
10306 case BFD_ENDIAN_LITTLE:
10307 tdep->arm_breakpoint = arm_default_arm_le_breakpoint;
10308 tdep->arm_breakpoint_size = sizeof (arm_default_arm_le_breakpoint);
10309 tdep->thumb_breakpoint = arm_default_thumb_le_breakpoint;
10310 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_le_breakpoint);
10311
10312 break;
10313
10314 default:
10315 internal_error (__FILE__, __LINE__,
10316 _("arm_gdbarch_init: bad byte order for float format"));
10317 }
10318
10319 /* On ARM targets char defaults to unsigned. */
10320 set_gdbarch_char_signed (gdbarch, 0);
10321
10322 /* Note: for displaced stepping, this includes the breakpoint, and one word
10323 of additional scratch space. This setting isn't used for anything beside
10324 displaced stepping at present. */
10325 set_gdbarch_max_insn_length (gdbarch, 4 * DISPLACED_MODIFIED_INSNS);
10326
10327 /* This should be low enough for everything. */
10328 tdep->lowest_pc = 0x20;
10329 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
10330
10331 /* The default, for both APCS and AAPCS, is to return small
10332 structures in registers. */
10333 tdep->struct_return = reg_struct_return;
10334
10335 set_gdbarch_push_dummy_call (gdbarch, arm_push_dummy_call);
10336 set_gdbarch_frame_align (gdbarch, arm_frame_align);
10337
10338 set_gdbarch_write_pc (gdbarch, arm_write_pc);
10339
10340 /* Frame handling. */
10341 set_gdbarch_dummy_id (gdbarch, arm_dummy_id);
10342 set_gdbarch_unwind_pc (gdbarch, arm_unwind_pc);
10343 set_gdbarch_unwind_sp (gdbarch, arm_unwind_sp);
10344
10345 frame_base_set_default (gdbarch, &arm_normal_base);
10346
10347 /* Address manipulation. */
10348 set_gdbarch_addr_bits_remove (gdbarch, arm_addr_bits_remove);
10349
10350 /* Advance PC across function entry code. */
10351 set_gdbarch_skip_prologue (gdbarch, arm_skip_prologue);
10352
10353 /* Detect whether PC is in function epilogue. */
10354 set_gdbarch_in_function_epilogue_p (gdbarch, arm_in_function_epilogue_p);
10355
10356 /* Skip trampolines. */
10357 set_gdbarch_skip_trampoline_code (gdbarch, arm_skip_stub);
10358
10359 /* The stack grows downward. */
10360 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
10361
10362 /* Breakpoint manipulation. */
10363 set_gdbarch_breakpoint_from_pc (gdbarch, arm_breakpoint_from_pc);
10364 set_gdbarch_remote_breakpoint_from_pc (gdbarch,
10365 arm_remote_breakpoint_from_pc);
10366
10367 /* Information about registers, etc. */
10368 set_gdbarch_sp_regnum (gdbarch, ARM_SP_REGNUM);
10369 set_gdbarch_pc_regnum (gdbarch, ARM_PC_REGNUM);
10370 set_gdbarch_num_regs (gdbarch, ARM_NUM_REGS);
10371 set_gdbarch_register_type (gdbarch, arm_register_type);
10372 set_gdbarch_register_reggroup_p (gdbarch, arm_register_reggroup_p);
10373
10374 /* This "info float" is FPA-specific. Use the generic version if we
10375 do not have FPA. */
10376 if (gdbarch_tdep (gdbarch)->have_fpa_registers)
10377 set_gdbarch_print_float_info (gdbarch, arm_print_float_info);
10378
10379 /* Internal <-> external register number maps. */
10380 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, arm_dwarf_reg_to_regnum);
10381 set_gdbarch_register_sim_regno (gdbarch, arm_register_sim_regno);
10382
10383 set_gdbarch_register_name (gdbarch, arm_register_name);
10384
10385 /* Returning results. */
10386 set_gdbarch_return_value (gdbarch, arm_return_value);
10387
10388 /* Disassembly. */
10389 set_gdbarch_print_insn (gdbarch, gdb_print_insn_arm);
10390
10391 /* Minsymbol frobbing. */
10392 set_gdbarch_elf_make_msymbol_special (gdbarch, arm_elf_make_msymbol_special);
10393 set_gdbarch_coff_make_msymbol_special (gdbarch,
10394 arm_coff_make_msymbol_special);
10395 set_gdbarch_record_special_symbol (gdbarch, arm_record_special_symbol);
10396
10397 /* Thumb-2 IT block support. */
10398 set_gdbarch_adjust_breakpoint_address (gdbarch,
10399 arm_adjust_breakpoint_address);
10400
10401 /* Virtual tables. */
10402 set_gdbarch_vbit_in_delta (gdbarch, 1);
10403
10404 /* Hook in the ABI-specific overrides, if they have been registered. */
10405 gdbarch_init_osabi (info, gdbarch);
10406
10407 dwarf2_frame_set_init_reg (gdbarch, arm_dwarf2_frame_init_reg);
10408
10409 /* Add some default predicates. */
10410 if (is_m)
10411 frame_unwind_append_unwinder (gdbarch, &arm_m_exception_unwind);
10412 frame_unwind_append_unwinder (gdbarch, &arm_stub_unwind);
10413 dwarf2_append_unwinders (gdbarch);
10414 frame_unwind_append_unwinder (gdbarch, &arm_exidx_unwind);
10415 frame_unwind_append_unwinder (gdbarch, &arm_prologue_unwind);
10416
10417 /* Now we have tuned the configuration, set a few final things,
10418 based on what the OS ABI has told us. */
10419
10420 /* If the ABI is not otherwise marked, assume the old GNU APCS. EABI
10421 binaries are always marked. */
10422 if (tdep->arm_abi == ARM_ABI_AUTO)
10423 tdep->arm_abi = ARM_ABI_APCS;
10424
10425 /* Watchpoints are not steppable. */
10426 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
10427
10428 /* We used to default to FPA for generic ARM, but almost nobody
10429 uses that now, and we now provide a way for the user to force
10430 the model. So default to the most useful variant. */
10431 if (tdep->fp_model == ARM_FLOAT_AUTO)
10432 tdep->fp_model = ARM_FLOAT_SOFT_FPA;
10433
10434 if (tdep->jb_pc >= 0)
10435 set_gdbarch_get_longjmp_target (gdbarch, arm_get_longjmp_target);
10436
10437 /* Floating point sizes and format. */
10438 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
10439 if (tdep->fp_model == ARM_FLOAT_SOFT_FPA || tdep->fp_model == ARM_FLOAT_FPA)
10440 {
10441 set_gdbarch_double_format
10442 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
10443 set_gdbarch_long_double_format
10444 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
10445 }
10446 else
10447 {
10448 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
10449 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
10450 }
10451
10452 if (have_vfp_pseudos)
10453 {
10454 /* NOTE: These are the only pseudo registers used by
10455 the ARM target at the moment. If more are added, a
10456 little more care in numbering will be needed. */
10457
10458 int num_pseudos = 32;
10459 if (have_neon_pseudos)
10460 num_pseudos += 16;
10461 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudos);
10462 set_gdbarch_pseudo_register_read (gdbarch, arm_pseudo_read);
10463 set_gdbarch_pseudo_register_write (gdbarch, arm_pseudo_write);
10464 }
10465
10466 if (tdesc_data)
10467 {
10468 set_tdesc_pseudo_register_name (gdbarch, arm_register_name);
10469
10470 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
10471
10472 /* Override tdesc_register_type to adjust the types of VFP
10473 registers for NEON. */
10474 set_gdbarch_register_type (gdbarch, arm_register_type);
10475 }
10476
10477 /* Add standard register aliases. We add aliases even for those
10478 nanes which are used by the current architecture - it's simpler,
10479 and does no harm, since nothing ever lists user registers. */
10480 for (i = 0; i < ARRAY_SIZE (arm_register_aliases); i++)
10481 user_reg_add (gdbarch, arm_register_aliases[i].name,
10482 value_of_arm_user_reg, &arm_register_aliases[i].regnum);
10483
10484 return gdbarch;
10485 }
10486
10487 static void
10488 arm_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
10489 {
10490 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
10491
10492 if (tdep == NULL)
10493 return;
10494
10495 fprintf_unfiltered (file, _("arm_dump_tdep: Lowest pc = 0x%lx"),
10496 (unsigned long) tdep->lowest_pc);
10497 }
10498
10499 extern initialize_file_ftype _initialize_arm_tdep; /* -Wmissing-prototypes */
10500
10501 void
10502 _initialize_arm_tdep (void)
10503 {
10504 struct ui_file *stb;
10505 long length;
10506 struct cmd_list_element *new_set, *new_show;
10507 const char *setname;
10508 const char *setdesc;
10509 const char *const *regnames;
10510 int numregs, i, j;
10511 static char *helptext;
10512 char regdesc[1024], *rdptr = regdesc;
10513 size_t rest = sizeof (regdesc);
10514
10515 gdbarch_register (bfd_arch_arm, arm_gdbarch_init, arm_dump_tdep);
10516
10517 arm_objfile_data_key
10518 = register_objfile_data_with_cleanup (NULL, arm_objfile_data_free);
10519
10520 /* Add ourselves to objfile event chain. */
10521 observer_attach_new_objfile (arm_exidx_new_objfile);
10522 arm_exidx_data_key
10523 = register_objfile_data_with_cleanup (NULL, arm_exidx_data_free);
10524
10525 /* Register an ELF OS ABI sniffer for ARM binaries. */
10526 gdbarch_register_osabi_sniffer (bfd_arch_arm,
10527 bfd_target_elf_flavour,
10528 arm_elf_osabi_sniffer);
10529
10530 /* Initialize the standard target descriptions. */
10531 initialize_tdesc_arm_with_m ();
10532 initialize_tdesc_arm_with_m_fpa_layout ();
10533 initialize_tdesc_arm_with_m_vfp_d16 ();
10534 initialize_tdesc_arm_with_iwmmxt ();
10535 initialize_tdesc_arm_with_vfpv2 ();
10536 initialize_tdesc_arm_with_vfpv3 ();
10537 initialize_tdesc_arm_with_neon ();
10538
10539 /* Get the number of possible sets of register names defined in opcodes. */
10540 num_disassembly_options = get_arm_regname_num_options ();
10541
10542 /* Add root prefix command for all "set arm"/"show arm" commands. */
10543 add_prefix_cmd ("arm", no_class, set_arm_command,
10544 _("Various ARM-specific commands."),
10545 &setarmcmdlist, "set arm ", 0, &setlist);
10546
10547 add_prefix_cmd ("arm", no_class, show_arm_command,
10548 _("Various ARM-specific commands."),
10549 &showarmcmdlist, "show arm ", 0, &showlist);
10550
10551 /* Sync the opcode insn printer with our register viewer. */
10552 parse_arm_disassembler_option ("reg-names-std");
10553
10554 /* Initialize the array that will be passed to
10555 add_setshow_enum_cmd(). */
10556 valid_disassembly_styles
10557 = xmalloc ((num_disassembly_options + 1) * sizeof (char *));
10558 for (i = 0; i < num_disassembly_options; i++)
10559 {
10560 numregs = get_arm_regnames (i, &setname, &setdesc, &regnames);
10561 valid_disassembly_styles[i] = setname;
10562 length = snprintf (rdptr, rest, "%s - %s\n", setname, setdesc);
10563 rdptr += length;
10564 rest -= length;
10565 /* When we find the default names, tell the disassembler to use
10566 them. */
10567 if (!strcmp (setname, "std"))
10568 {
10569 disassembly_style = setname;
10570 set_arm_regname_option (i);
10571 }
10572 }
10573 /* Mark the end of valid options. */
10574 valid_disassembly_styles[num_disassembly_options] = NULL;
10575
10576 /* Create the help text. */
10577 stb = mem_fileopen ();
10578 fprintf_unfiltered (stb, "%s%s%s",
10579 _("The valid values are:\n"),
10580 regdesc,
10581 _("The default is \"std\"."));
10582 helptext = ui_file_xstrdup (stb, NULL);
10583 ui_file_delete (stb);
10584
10585 add_setshow_enum_cmd("disassembler", no_class,
10586 valid_disassembly_styles, &disassembly_style,
10587 _("Set the disassembly style."),
10588 _("Show the disassembly style."),
10589 helptext,
10590 set_disassembly_style_sfunc,
10591 NULL, /* FIXME: i18n: The disassembly style is
10592 \"%s\". */
10593 &setarmcmdlist, &showarmcmdlist);
10594
10595 add_setshow_boolean_cmd ("apcs32", no_class, &arm_apcs_32,
10596 _("Set usage of ARM 32-bit mode."),
10597 _("Show usage of ARM 32-bit mode."),
10598 _("When off, a 26-bit PC will be used."),
10599 NULL,
10600 NULL, /* FIXME: i18n: Usage of ARM 32-bit
10601 mode is %s. */
10602 &setarmcmdlist, &showarmcmdlist);
10603
10604 /* Add a command to allow the user to force the FPU model. */
10605 add_setshow_enum_cmd ("fpu", no_class, fp_model_strings, &current_fp_model,
10606 _("Set the floating point type."),
10607 _("Show the floating point type."),
10608 _("auto - Determine the FP typefrom the OS-ABI.\n\
10609 softfpa - Software FP, mixed-endian doubles on little-endian ARMs.\n\
10610 fpa - FPA co-processor (GCC compiled).\n\
10611 softvfp - Software FP with pure-endian doubles.\n\
10612 vfp - VFP co-processor."),
10613 set_fp_model_sfunc, show_fp_model,
10614 &setarmcmdlist, &showarmcmdlist);
10615
10616 /* Add a command to allow the user to force the ABI. */
10617 add_setshow_enum_cmd ("abi", class_support, arm_abi_strings, &arm_abi_string,
10618 _("Set the ABI."),
10619 _("Show the ABI."),
10620 NULL, arm_set_abi, arm_show_abi,
10621 &setarmcmdlist, &showarmcmdlist);
10622
10623 /* Add two commands to allow the user to force the assumed
10624 execution mode. */
10625 add_setshow_enum_cmd ("fallback-mode", class_support,
10626 arm_mode_strings, &arm_fallback_mode_string,
10627 _("Set the mode assumed when symbols are unavailable."),
10628 _("Show the mode assumed when symbols are unavailable."),
10629 NULL, NULL, arm_show_fallback_mode,
10630 &setarmcmdlist, &showarmcmdlist);
10631 add_setshow_enum_cmd ("force-mode", class_support,
10632 arm_mode_strings, &arm_force_mode_string,
10633 _("Set the mode assumed even when symbols are available."),
10634 _("Show the mode assumed even when symbols are available."),
10635 NULL, NULL, arm_show_force_mode,
10636 &setarmcmdlist, &showarmcmdlist);
10637
10638 /* Debugging flag. */
10639 add_setshow_boolean_cmd ("arm", class_maintenance, &arm_debug,
10640 _("Set ARM debugging."),
10641 _("Show ARM debugging."),
10642 _("When on, arm-specific debugging is enabled."),
10643 NULL,
10644 NULL, /* FIXME: i18n: "ARM debugging is %s. */
10645 &setdebuglist, &showdebuglist);
10646 }
10647
10648 /* ARM-reversible process record data structures. */
10649
10650 #define ARM_INSN_SIZE_BYTES 4
10651 #define THUMB_INSN_SIZE_BYTES 2
10652 #define THUMB2_INSN_SIZE_BYTES 4
10653
10654
10655 #define INSN_S_L_BIT_NUM 20
10656
10657 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
10658 do \
10659 { \
10660 unsigned int reg_len = LENGTH; \
10661 if (reg_len) \
10662 { \
10663 REGS = XNEWVEC (uint32_t, reg_len); \
10664 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
10665 } \
10666 } \
10667 while (0)
10668
10669 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
10670 do \
10671 { \
10672 unsigned int mem_len = LENGTH; \
10673 if (mem_len) \
10674 { \
10675 MEMS = XNEWVEC (struct arm_mem_r, mem_len); \
10676 memcpy(&MEMS->len, &RECORD_BUF[0], \
10677 sizeof(struct arm_mem_r) * LENGTH); \
10678 } \
10679 } \
10680 while (0)
10681
10682 /* Checks whether insn is already recorded or yet to be decoded. (boolean expression). */
10683 #define INSN_RECORDED(ARM_RECORD) \
10684 (0 != (ARM_RECORD)->reg_rec_count || 0 != (ARM_RECORD)->mem_rec_count)
10685
10686 /* ARM memory record structure. */
10687 struct arm_mem_r
10688 {
10689 uint32_t len; /* Record length. */
10690 uint32_t addr; /* Memory address. */
10691 };
10692
10693 /* ARM instruction record contains opcode of current insn
10694 and execution state (before entry to decode_insn()),
10695 contains list of to-be-modified registers and
10696 memory blocks (on return from decode_insn()). */
10697
10698 typedef struct insn_decode_record_t
10699 {
10700 struct gdbarch *gdbarch;
10701 struct regcache *regcache;
10702 CORE_ADDR this_addr; /* Address of the insn being decoded. */
10703 uint32_t arm_insn; /* Should accommodate thumb. */
10704 uint32_t cond; /* Condition code. */
10705 uint32_t opcode; /* Insn opcode. */
10706 uint32_t decode; /* Insn decode bits. */
10707 uint32_t mem_rec_count; /* No of mem records. */
10708 uint32_t reg_rec_count; /* No of reg records. */
10709 uint32_t *arm_regs; /* Registers to be saved for this record. */
10710 struct arm_mem_r *arm_mems; /* Memory to be saved for this record. */
10711 } insn_decode_record;
10712
10713
10714 /* Checks ARM SBZ and SBO mandatory fields. */
10715
10716 static int
10717 sbo_sbz (uint32_t insn, uint32_t bit_num, uint32_t len, uint32_t sbo)
10718 {
10719 uint32_t ones = bits (insn, bit_num - 1, (bit_num -1) + (len - 1));
10720
10721 if (!len)
10722 return 1;
10723
10724 if (!sbo)
10725 ones = ~ones;
10726
10727 while (ones)
10728 {
10729 if (!(ones & sbo))
10730 {
10731 return 0;
10732 }
10733 ones = ones >> 1;
10734 }
10735 return 1;
10736 }
10737
10738 enum arm_record_result
10739 {
10740 ARM_RECORD_SUCCESS = 0,
10741 ARM_RECORD_FAILURE = 1
10742 };
10743
10744 typedef enum
10745 {
10746 ARM_RECORD_STRH=1,
10747 ARM_RECORD_STRD
10748 } arm_record_strx_t;
10749
10750 typedef enum
10751 {
10752 ARM_RECORD=1,
10753 THUMB_RECORD,
10754 THUMB2_RECORD
10755 } record_type_t;
10756
10757
10758 static int
10759 arm_record_strx (insn_decode_record *arm_insn_r, uint32_t *record_buf,
10760 uint32_t *record_buf_mem, arm_record_strx_t str_type)
10761 {
10762
10763 struct regcache *reg_cache = arm_insn_r->regcache;
10764 ULONGEST u_regval[2]= {0};
10765
10766 uint32_t reg_src1 = 0, reg_src2 = 0;
10767 uint32_t immed_high = 0, immed_low = 0,offset_8 = 0, tgt_mem_addr = 0;
10768 uint32_t opcode1 = 0;
10769
10770 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
10771 arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
10772 opcode1 = bits (arm_insn_r->arm_insn, 20, 24);
10773
10774
10775 if (14 == arm_insn_r->opcode || 10 == arm_insn_r->opcode)
10776 {
10777 /* 1) Handle misc store, immediate offset. */
10778 immed_low = bits (arm_insn_r->arm_insn, 0, 3);
10779 immed_high = bits (arm_insn_r->arm_insn, 8, 11);
10780 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
10781 regcache_raw_read_unsigned (reg_cache, reg_src1,
10782 &u_regval[0]);
10783 if (ARM_PC_REGNUM == reg_src1)
10784 {
10785 /* If R15 was used as Rn, hence current PC+8. */
10786 u_regval[0] = u_regval[0] + 8;
10787 }
10788 offset_8 = (immed_high << 4) | immed_low;
10789 /* Calculate target store address. */
10790 if (14 == arm_insn_r->opcode)
10791 {
10792 tgt_mem_addr = u_regval[0] + offset_8;
10793 }
10794 else
10795 {
10796 tgt_mem_addr = u_regval[0] - offset_8;
10797 }
10798 if (ARM_RECORD_STRH == str_type)
10799 {
10800 record_buf_mem[0] = 2;
10801 record_buf_mem[1] = tgt_mem_addr;
10802 arm_insn_r->mem_rec_count = 1;
10803 }
10804 else if (ARM_RECORD_STRD == str_type)
10805 {
10806 record_buf_mem[0] = 4;
10807 record_buf_mem[1] = tgt_mem_addr;
10808 record_buf_mem[2] = 4;
10809 record_buf_mem[3] = tgt_mem_addr + 4;
10810 arm_insn_r->mem_rec_count = 2;
10811 }
10812 }
10813 else if (12 == arm_insn_r->opcode || 8 == arm_insn_r->opcode)
10814 {
10815 /* 2) Store, register offset. */
10816 /* Get Rm. */
10817 reg_src1 = bits (arm_insn_r->arm_insn, 0, 3);
10818 /* Get Rn. */
10819 reg_src2 = bits (arm_insn_r->arm_insn, 16, 19);
10820 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
10821 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
10822 if (15 == reg_src2)
10823 {
10824 /* If R15 was used as Rn, hence current PC+8. */
10825 u_regval[0] = u_regval[0] + 8;
10826 }
10827 /* Calculate target store address, Rn +/- Rm, register offset. */
10828 if (12 == arm_insn_r->opcode)
10829 {
10830 tgt_mem_addr = u_regval[0] + u_regval[1];
10831 }
10832 else
10833 {
10834 tgt_mem_addr = u_regval[1] - u_regval[0];
10835 }
10836 if (ARM_RECORD_STRH == str_type)
10837 {
10838 record_buf_mem[0] = 2;
10839 record_buf_mem[1] = tgt_mem_addr;
10840 arm_insn_r->mem_rec_count = 1;
10841 }
10842 else if (ARM_RECORD_STRD == str_type)
10843 {
10844 record_buf_mem[0] = 4;
10845 record_buf_mem[1] = tgt_mem_addr;
10846 record_buf_mem[2] = 4;
10847 record_buf_mem[3] = tgt_mem_addr + 4;
10848 arm_insn_r->mem_rec_count = 2;
10849 }
10850 }
10851 else if (11 == arm_insn_r->opcode || 15 == arm_insn_r->opcode
10852 || 2 == arm_insn_r->opcode || 6 == arm_insn_r->opcode)
10853 {
10854 /* 3) Store, immediate pre-indexed. */
10855 /* 5) Store, immediate post-indexed. */
10856 immed_low = bits (arm_insn_r->arm_insn, 0, 3);
10857 immed_high = bits (arm_insn_r->arm_insn, 8, 11);
10858 offset_8 = (immed_high << 4) | immed_low;
10859 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
10860 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
10861 /* Calculate target store address, Rn +/- Rm, register offset. */
10862 if (15 == arm_insn_r->opcode || 6 == arm_insn_r->opcode)
10863 {
10864 tgt_mem_addr = u_regval[0] + offset_8;
10865 }
10866 else
10867 {
10868 tgt_mem_addr = u_regval[0] - offset_8;
10869 }
10870 if (ARM_RECORD_STRH == str_type)
10871 {
10872 record_buf_mem[0] = 2;
10873 record_buf_mem[1] = tgt_mem_addr;
10874 arm_insn_r->mem_rec_count = 1;
10875 }
10876 else if (ARM_RECORD_STRD == str_type)
10877 {
10878 record_buf_mem[0] = 4;
10879 record_buf_mem[1] = tgt_mem_addr;
10880 record_buf_mem[2] = 4;
10881 record_buf_mem[3] = tgt_mem_addr + 4;
10882 arm_insn_r->mem_rec_count = 2;
10883 }
10884 /* Record Rn also as it changes. */
10885 *(record_buf) = bits (arm_insn_r->arm_insn, 16, 19);
10886 arm_insn_r->reg_rec_count = 1;
10887 }
10888 else if (9 == arm_insn_r->opcode || 13 == arm_insn_r->opcode
10889 || 0 == arm_insn_r->opcode || 4 == arm_insn_r->opcode)
10890 {
10891 /* 4) Store, register pre-indexed. */
10892 /* 6) Store, register post -indexed. */
10893 reg_src1 = bits (arm_insn_r->arm_insn, 0, 3);
10894 reg_src2 = bits (arm_insn_r->arm_insn, 16, 19);
10895 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
10896 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
10897 /* Calculate target store address, Rn +/- Rm, register offset. */
10898 if (13 == arm_insn_r->opcode || 4 == arm_insn_r->opcode)
10899 {
10900 tgt_mem_addr = u_regval[0] + u_regval[1];
10901 }
10902 else
10903 {
10904 tgt_mem_addr = u_regval[1] - u_regval[0];
10905 }
10906 if (ARM_RECORD_STRH == str_type)
10907 {
10908 record_buf_mem[0] = 2;
10909 record_buf_mem[1] = tgt_mem_addr;
10910 arm_insn_r->mem_rec_count = 1;
10911 }
10912 else if (ARM_RECORD_STRD == str_type)
10913 {
10914 record_buf_mem[0] = 4;
10915 record_buf_mem[1] = tgt_mem_addr;
10916 record_buf_mem[2] = 4;
10917 record_buf_mem[3] = tgt_mem_addr + 4;
10918 arm_insn_r->mem_rec_count = 2;
10919 }
10920 /* Record Rn also as it changes. */
10921 *(record_buf) = bits (arm_insn_r->arm_insn, 16, 19);
10922 arm_insn_r->reg_rec_count = 1;
10923 }
10924 return 0;
10925 }
10926
10927 /* Handling ARM extension space insns. */
10928
10929 static int
10930 arm_record_extension_space (insn_decode_record *arm_insn_r)
10931 {
10932 uint32_t ret = 0; /* Return value: -1:record failure ; 0:success */
10933 uint32_t opcode1 = 0, opcode2 = 0, insn_op1 = 0;
10934 uint32_t record_buf[8], record_buf_mem[8];
10935 uint32_t reg_src1 = 0;
10936 uint32_t immed_high = 0, immed_low = 0,offset_8 = 0, tgt_mem_addr = 0;
10937 struct regcache *reg_cache = arm_insn_r->regcache;
10938 ULONGEST u_regval = 0;
10939
10940 gdb_assert (!INSN_RECORDED(arm_insn_r));
10941 /* Handle unconditional insn extension space. */
10942
10943 opcode1 = bits (arm_insn_r->arm_insn, 20, 27);
10944 opcode2 = bits (arm_insn_r->arm_insn, 4, 7);
10945 if (arm_insn_r->cond)
10946 {
10947 /* PLD has no affect on architectural state, it just affects
10948 the caches. */
10949 if (5 == ((opcode1 & 0xE0) >> 5))
10950 {
10951 /* BLX(1) */
10952 record_buf[0] = ARM_PS_REGNUM;
10953 record_buf[1] = ARM_LR_REGNUM;
10954 arm_insn_r->reg_rec_count = 2;
10955 }
10956 /* STC2, LDC2, MCR2, MRC2, CDP2: <TBD>, co-processor insn. */
10957 }
10958
10959
10960 opcode1 = bits (arm_insn_r->arm_insn, 25, 27);
10961 if (3 == opcode1 && bit (arm_insn_r->arm_insn, 4))
10962 {
10963 ret = -1;
10964 /* Undefined instruction on ARM V5; need to handle if later
10965 versions define it. */
10966 }
10967
10968 opcode1 = bits (arm_insn_r->arm_insn, 24, 27);
10969 opcode2 = bits (arm_insn_r->arm_insn, 4, 7);
10970 insn_op1 = bits (arm_insn_r->arm_insn, 20, 23);
10971
10972 /* Handle arithmetic insn extension space. */
10973 if (!opcode1 && 9 == opcode2 && 1 != arm_insn_r->cond
10974 && !INSN_RECORDED(arm_insn_r))
10975 {
10976 /* Handle MLA(S) and MUL(S). */
10977 if (0 <= insn_op1 && 3 >= insn_op1)
10978 {
10979 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10980 record_buf[1] = ARM_PS_REGNUM;
10981 arm_insn_r->reg_rec_count = 2;
10982 }
10983 else if (4 <= insn_op1 && 15 >= insn_op1)
10984 {
10985 /* Handle SMLAL(S), SMULL(S), UMLAL(S), UMULL(S). */
10986 record_buf[0] = bits (arm_insn_r->arm_insn, 16, 19);
10987 record_buf[1] = bits (arm_insn_r->arm_insn, 12, 15);
10988 record_buf[2] = ARM_PS_REGNUM;
10989 arm_insn_r->reg_rec_count = 3;
10990 }
10991 }
10992
10993 opcode1 = bits (arm_insn_r->arm_insn, 26, 27);
10994 opcode2 = bits (arm_insn_r->arm_insn, 23, 24);
10995 insn_op1 = bits (arm_insn_r->arm_insn, 21, 22);
10996
10997 /* Handle control insn extension space. */
10998
10999 if (!opcode1 && 2 == opcode2 && !bit (arm_insn_r->arm_insn, 20)
11000 && 1 != arm_insn_r->cond && !INSN_RECORDED(arm_insn_r))
11001 {
11002 if (!bit (arm_insn_r->arm_insn,25))
11003 {
11004 if (!bits (arm_insn_r->arm_insn, 4, 7))
11005 {
11006 if ((0 == insn_op1) || (2 == insn_op1))
11007 {
11008 /* MRS. */
11009 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11010 arm_insn_r->reg_rec_count = 1;
11011 }
11012 else if (1 == insn_op1)
11013 {
11014 /* CSPR is going to be changed. */
11015 record_buf[0] = ARM_PS_REGNUM;
11016 arm_insn_r->reg_rec_count = 1;
11017 }
11018 else if (3 == insn_op1)
11019 {
11020 /* SPSR is going to be changed. */
11021 /* We need to get SPSR value, which is yet to be done. */
11022 printf_unfiltered (_("Process record does not support "
11023 "instruction 0x%0x at address %s.\n"),
11024 arm_insn_r->arm_insn,
11025 paddress (arm_insn_r->gdbarch,
11026 arm_insn_r->this_addr));
11027 return -1;
11028 }
11029 }
11030 else if (1 == bits (arm_insn_r->arm_insn, 4, 7))
11031 {
11032 if (1 == insn_op1)
11033 {
11034 /* BX. */
11035 record_buf[0] = ARM_PS_REGNUM;
11036 arm_insn_r->reg_rec_count = 1;
11037 }
11038 else if (3 == insn_op1)
11039 {
11040 /* CLZ. */
11041 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11042 arm_insn_r->reg_rec_count = 1;
11043 }
11044 }
11045 else if (3 == bits (arm_insn_r->arm_insn, 4, 7))
11046 {
11047 /* BLX. */
11048 record_buf[0] = ARM_PS_REGNUM;
11049 record_buf[1] = ARM_LR_REGNUM;
11050 arm_insn_r->reg_rec_count = 2;
11051 }
11052 else if (5 == bits (arm_insn_r->arm_insn, 4, 7))
11053 {
11054 /* QADD, QSUB, QDADD, QDSUB */
11055 record_buf[0] = ARM_PS_REGNUM;
11056 record_buf[1] = bits (arm_insn_r->arm_insn, 12, 15);
11057 arm_insn_r->reg_rec_count = 2;
11058 }
11059 else if (7 == bits (arm_insn_r->arm_insn, 4, 7))
11060 {
11061 /* BKPT. */
11062 record_buf[0] = ARM_PS_REGNUM;
11063 record_buf[1] = ARM_LR_REGNUM;
11064 arm_insn_r->reg_rec_count = 2;
11065
11066 /* Save SPSR also;how? */
11067 printf_unfiltered (_("Process record does not support "
11068 "instruction 0x%0x at address %s.\n"),
11069 arm_insn_r->arm_insn,
11070 paddress (arm_insn_r->gdbarch, arm_insn_r->this_addr));
11071 return -1;
11072 }
11073 else if(8 == bits (arm_insn_r->arm_insn, 4, 7)
11074 || 10 == bits (arm_insn_r->arm_insn, 4, 7)
11075 || 12 == bits (arm_insn_r->arm_insn, 4, 7)
11076 || 14 == bits (arm_insn_r->arm_insn, 4, 7)
11077 )
11078 {
11079 if (0 == insn_op1 || 1 == insn_op1)
11080 {
11081 /* SMLA<x><y>, SMLAW<y>, SMULW<y>. */
11082 /* We dont do optimization for SMULW<y> where we
11083 need only Rd. */
11084 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11085 record_buf[1] = ARM_PS_REGNUM;
11086 arm_insn_r->reg_rec_count = 2;
11087 }
11088 else if (2 == insn_op1)
11089 {
11090 /* SMLAL<x><y>. */
11091 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11092 record_buf[1] = bits (arm_insn_r->arm_insn, 16, 19);
11093 arm_insn_r->reg_rec_count = 2;
11094 }
11095 else if (3 == insn_op1)
11096 {
11097 /* SMUL<x><y>. */
11098 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11099 arm_insn_r->reg_rec_count = 1;
11100 }
11101 }
11102 }
11103 else
11104 {
11105 /* MSR : immediate form. */
11106 if (1 == insn_op1)
11107 {
11108 /* CSPR is going to be changed. */
11109 record_buf[0] = ARM_PS_REGNUM;
11110 arm_insn_r->reg_rec_count = 1;
11111 }
11112 else if (3 == insn_op1)
11113 {
11114 /* SPSR is going to be changed. */
11115 /* we need to get SPSR value, which is yet to be done */
11116 printf_unfiltered (_("Process record does not support "
11117 "instruction 0x%0x at address %s.\n"),
11118 arm_insn_r->arm_insn,
11119 paddress (arm_insn_r->gdbarch,
11120 arm_insn_r->this_addr));
11121 return -1;
11122 }
11123 }
11124 }
11125
11126 opcode1 = bits (arm_insn_r->arm_insn, 25, 27);
11127 opcode2 = bits (arm_insn_r->arm_insn, 20, 24);
11128 insn_op1 = bits (arm_insn_r->arm_insn, 5, 6);
11129
11130 /* Handle load/store insn extension space. */
11131
11132 if (!opcode1 && bit (arm_insn_r->arm_insn, 7)
11133 && bit (arm_insn_r->arm_insn, 4) && 1 != arm_insn_r->cond
11134 && !INSN_RECORDED(arm_insn_r))
11135 {
11136 /* SWP/SWPB. */
11137 if (0 == insn_op1)
11138 {
11139 /* These insn, changes register and memory as well. */
11140 /* SWP or SWPB insn. */
11141 /* Get memory address given by Rn. */
11142 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
11143 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval);
11144 /* SWP insn ?, swaps word. */
11145 if (8 == arm_insn_r->opcode)
11146 {
11147 record_buf_mem[0] = 4;
11148 }
11149 else
11150 {
11151 /* SWPB insn, swaps only byte. */
11152 record_buf_mem[0] = 1;
11153 }
11154 record_buf_mem[1] = u_regval;
11155 arm_insn_r->mem_rec_count = 1;
11156 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11157 arm_insn_r->reg_rec_count = 1;
11158 }
11159 else if (1 == insn_op1 && !bit (arm_insn_r->arm_insn, 20))
11160 {
11161 /* STRH. */
11162 arm_record_strx(arm_insn_r, &record_buf[0], &record_buf_mem[0],
11163 ARM_RECORD_STRH);
11164 }
11165 else if (2 == insn_op1 && !bit (arm_insn_r->arm_insn, 20))
11166 {
11167 /* LDRD. */
11168 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11169 record_buf[1] = record_buf[0] + 1;
11170 arm_insn_r->reg_rec_count = 2;
11171 }
11172 else if (3 == insn_op1 && !bit (arm_insn_r->arm_insn, 20))
11173 {
11174 /* STRD. */
11175 arm_record_strx(arm_insn_r, &record_buf[0], &record_buf_mem[0],
11176 ARM_RECORD_STRD);
11177 }
11178 else if (bit (arm_insn_r->arm_insn, 20) && insn_op1 <= 3)
11179 {
11180 /* LDRH, LDRSB, LDRSH. */
11181 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11182 arm_insn_r->reg_rec_count = 1;
11183 }
11184
11185 }
11186
11187 opcode1 = bits (arm_insn_r->arm_insn, 23, 27);
11188 if (24 == opcode1 && bit (arm_insn_r->arm_insn, 21)
11189 && !INSN_RECORDED(arm_insn_r))
11190 {
11191 ret = -1;
11192 /* Handle coprocessor insn extension space. */
11193 }
11194
11195 /* To be done for ARMv5 and later; as of now we return -1. */
11196 if (-1 == ret)
11197 printf_unfiltered (_("Process record does not support instruction x%0x "
11198 "at address %s.\n"),arm_insn_r->arm_insn,
11199 paddress (arm_insn_r->gdbarch, arm_insn_r->this_addr));
11200
11201
11202 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11203 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
11204
11205 return ret;
11206 }
11207
11208 /* Handling opcode 000 insns. */
11209
11210 static int
11211 arm_record_data_proc_misc_ld_str (insn_decode_record *arm_insn_r)
11212 {
11213 struct regcache *reg_cache = arm_insn_r->regcache;
11214 uint32_t record_buf[8], record_buf_mem[8];
11215 ULONGEST u_regval[2] = {0};
11216
11217 uint32_t reg_src1 = 0, reg_src2 = 0, reg_dest = 0;
11218 uint32_t immed_high = 0, immed_low = 0, offset_8 = 0, tgt_mem_addr = 0;
11219 uint32_t opcode1 = 0;
11220
11221 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
11222 arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
11223 opcode1 = bits (arm_insn_r->arm_insn, 20, 24);
11224
11225 /* Data processing insn /multiply insn. */
11226 if (9 == arm_insn_r->decode
11227 && ((4 <= arm_insn_r->opcode && 7 >= arm_insn_r->opcode)
11228 || (0 == arm_insn_r->opcode || 1 == arm_insn_r->opcode)))
11229 {
11230 /* Handle multiply instructions. */
11231 /* MLA, MUL, SMLAL, SMULL, UMLAL, UMULL. */
11232 if (0 == arm_insn_r->opcode || 1 == arm_insn_r->opcode)
11233 {
11234 /* Handle MLA and MUL. */
11235 record_buf[0] = bits (arm_insn_r->arm_insn, 16, 19);
11236 record_buf[1] = ARM_PS_REGNUM;
11237 arm_insn_r->reg_rec_count = 2;
11238 }
11239 else if (4 <= arm_insn_r->opcode && 7 >= arm_insn_r->opcode)
11240 {
11241 /* Handle SMLAL, SMULL, UMLAL, UMULL. */
11242 record_buf[0] = bits (arm_insn_r->arm_insn, 16, 19);
11243 record_buf[1] = bits (arm_insn_r->arm_insn, 12, 15);
11244 record_buf[2] = ARM_PS_REGNUM;
11245 arm_insn_r->reg_rec_count = 3;
11246 }
11247 }
11248 else if (bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM)
11249 && (11 == arm_insn_r->decode || 13 == arm_insn_r->decode))
11250 {
11251 /* Handle misc load insns, as 20th bit (L = 1). */
11252 /* LDR insn has a capability to do branching, if
11253 MOV LR, PC is precceded by LDR insn having Rn as R15
11254 in that case, it emulates branch and link insn, and hence we
11255 need to save CSPR and PC as well. I am not sure this is right
11256 place; as opcode = 010 LDR insn make this happen, if R15 was
11257 used. */
11258 reg_dest = bits (arm_insn_r->arm_insn, 12, 15);
11259 if (15 != reg_dest)
11260 {
11261 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11262 arm_insn_r->reg_rec_count = 1;
11263 }
11264 else
11265 {
11266 record_buf[0] = reg_dest;
11267 record_buf[1] = ARM_PS_REGNUM;
11268 arm_insn_r->reg_rec_count = 2;
11269 }
11270 }
11271 else if ((9 == arm_insn_r->opcode || 11 == arm_insn_r->opcode)
11272 && sbo_sbz (arm_insn_r->arm_insn, 5, 12, 0)
11273 && sbo_sbz (arm_insn_r->arm_insn, 13, 4, 1)
11274 && 2 == bits (arm_insn_r->arm_insn, 20, 21))
11275 {
11276 /* Handle MSR insn. */
11277 if (9 == arm_insn_r->opcode)
11278 {
11279 /* CSPR is going to be changed. */
11280 record_buf[0] = ARM_PS_REGNUM;
11281 arm_insn_r->reg_rec_count = 1;
11282 }
11283 else
11284 {
11285 /* SPSR is going to be changed. */
11286 /* How to read SPSR value? */
11287 printf_unfiltered (_("Process record does not support instruction "
11288 "0x%0x at address %s.\n"),
11289 arm_insn_r->arm_insn,
11290 paddress (arm_insn_r->gdbarch, arm_insn_r->this_addr));
11291 return -1;
11292 }
11293 }
11294 else if (9 == arm_insn_r->decode
11295 && (8 == arm_insn_r->opcode || 10 == arm_insn_r->opcode)
11296 && !bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM))
11297 {
11298 /* Handling SWP, SWPB. */
11299 /* These insn, changes register and memory as well. */
11300 /* SWP or SWPB insn. */
11301
11302 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
11303 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
11304 /* SWP insn ?, swaps word. */
11305 if (8 == arm_insn_r->opcode)
11306 {
11307 record_buf_mem[0] = 4;
11308 }
11309 else
11310 {
11311 /* SWPB insn, swaps only byte. */
11312 record_buf_mem[0] = 1;
11313 }
11314 record_buf_mem[1] = u_regval[0];
11315 arm_insn_r->mem_rec_count = 1;
11316 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11317 arm_insn_r->reg_rec_count = 1;
11318 }
11319 else if (3 == arm_insn_r->decode && 0x12 == opcode1
11320 && sbo_sbz (arm_insn_r->arm_insn, 9, 12, 1))
11321 {
11322 /* Handle BLX, branch and link/exchange. */
11323 if (9 == arm_insn_r->opcode)
11324 {
11325 /* Branch is chosen by setting T bit of CSPR, bitp[0] of Rm,
11326 and R14 stores the return address. */
11327 record_buf[0] = ARM_PS_REGNUM;
11328 record_buf[1] = ARM_LR_REGNUM;
11329 arm_insn_r->reg_rec_count = 2;
11330 }
11331 }
11332 else if (7 == arm_insn_r->decode && 0x12 == opcode1)
11333 {
11334 /* Handle enhanced software breakpoint insn, BKPT. */
11335 /* CPSR is changed to be executed in ARM state, disabling normal
11336 interrupts, entering abort mode. */
11337 /* According to high vector configuration PC is set. */
11338 /* user hit breakpoint and type reverse, in
11339 that case, we need to go back with previous CPSR and
11340 Program Counter. */
11341 record_buf[0] = ARM_PS_REGNUM;
11342 record_buf[1] = ARM_LR_REGNUM;
11343 arm_insn_r->reg_rec_count = 2;
11344
11345 /* Save SPSR also; how? */
11346 printf_unfiltered (_("Process record does not support instruction "
11347 "0x%0x at address %s.\n"),arm_insn_r->arm_insn,
11348 paddress (arm_insn_r->gdbarch,
11349 arm_insn_r->this_addr));
11350 return -1;
11351 }
11352 else if (11 == arm_insn_r->decode
11353 && !bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM))
11354 {
11355 /* Handle enhanced store insns and DSP insns (e.g. LDRD). */
11356
11357 /* Handle str(x) insn */
11358 arm_record_strx(arm_insn_r, &record_buf[0], &record_buf_mem[0],
11359 ARM_RECORD_STRH);
11360 }
11361 else if (1 == arm_insn_r->decode && 0x12 == opcode1
11362 && sbo_sbz (arm_insn_r->arm_insn, 9, 12, 1))
11363 {
11364 /* Handle BX, branch and link/exchange. */
11365 /* Branch is chosen by setting T bit of CSPR, bitp[0] of Rm. */
11366 record_buf[0] = ARM_PS_REGNUM;
11367 arm_insn_r->reg_rec_count = 1;
11368 }
11369 else if (1 == arm_insn_r->decode && 0x16 == opcode1
11370 && sbo_sbz (arm_insn_r->arm_insn, 9, 4, 1)
11371 && sbo_sbz (arm_insn_r->arm_insn, 17, 4, 1))
11372 {
11373 /* Count leading zeros: CLZ. */
11374 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11375 arm_insn_r->reg_rec_count = 1;
11376 }
11377 else if (!bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM)
11378 && (8 == arm_insn_r->opcode || 10 == arm_insn_r->opcode)
11379 && sbo_sbz (arm_insn_r->arm_insn, 17, 4, 1)
11380 && sbo_sbz (arm_insn_r->arm_insn, 1, 12, 0)
11381 )
11382 {
11383 /* Handle MRS insn. */
11384 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11385 arm_insn_r->reg_rec_count = 1;
11386 }
11387 else if (arm_insn_r->opcode <= 15)
11388 {
11389 /* Normal data processing insns. */
11390 /* Out of 11 shifter operands mode, all the insn modifies destination
11391 register, which is specified by 13-16 decode. */
11392 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11393 record_buf[1] = ARM_PS_REGNUM;
11394 arm_insn_r->reg_rec_count = 2;
11395 }
11396 else
11397 {
11398 return -1;
11399 }
11400
11401 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11402 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
11403 return 0;
11404 }
11405
11406 /* Handling opcode 001 insns. */
11407
11408 static int
11409 arm_record_data_proc_imm (insn_decode_record *arm_insn_r)
11410 {
11411 uint32_t record_buf[8], record_buf_mem[8];
11412
11413 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
11414 arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
11415
11416 if ((9 == arm_insn_r->opcode || 11 == arm_insn_r->opcode)
11417 && 2 == bits (arm_insn_r->arm_insn, 20, 21)
11418 && sbo_sbz (arm_insn_r->arm_insn, 13, 4, 1)
11419 )
11420 {
11421 /* Handle MSR insn. */
11422 if (9 == arm_insn_r->opcode)
11423 {
11424 /* CSPR is going to be changed. */
11425 record_buf[0] = ARM_PS_REGNUM;
11426 arm_insn_r->reg_rec_count = 1;
11427 }
11428 else
11429 {
11430 /* SPSR is going to be changed. */
11431 }
11432 }
11433 else if (arm_insn_r->opcode <= 15)
11434 {
11435 /* Normal data processing insns. */
11436 /* Out of 11 shifter operands mode, all the insn modifies destination
11437 register, which is specified by 13-16 decode. */
11438 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11439 record_buf[1] = ARM_PS_REGNUM;
11440 arm_insn_r->reg_rec_count = 2;
11441 }
11442 else
11443 {
11444 return -1;
11445 }
11446
11447 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11448 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
11449 return 0;
11450 }
11451
11452 /* Handling opcode 010 insns. */
11453
11454 static int
11455 arm_record_ld_st_imm_offset (insn_decode_record *arm_insn_r)
11456 {
11457 struct regcache *reg_cache = arm_insn_r->regcache;
11458
11459 uint32_t reg_src1 = 0 , reg_dest = 0;
11460 uint32_t offset_12 = 0, tgt_mem_addr = 0;
11461 uint32_t record_buf[8], record_buf_mem[8];
11462
11463 ULONGEST u_regval = 0;
11464
11465 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
11466 arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
11467
11468 if (bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM))
11469 {
11470 reg_dest = bits (arm_insn_r->arm_insn, 12, 15);
11471 /* LDR insn has a capability to do branching, if
11472 MOV LR, PC is precedded by LDR insn having Rn as R15
11473 in that case, it emulates branch and link insn, and hence we
11474 need to save CSPR and PC as well. */
11475 if (ARM_PC_REGNUM != reg_dest)
11476 {
11477 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11478 arm_insn_r->reg_rec_count = 1;
11479 }
11480 else
11481 {
11482 record_buf[0] = reg_dest;
11483 record_buf[1] = ARM_PS_REGNUM;
11484 arm_insn_r->reg_rec_count = 2;
11485 }
11486 }
11487 else
11488 {
11489 /* Store, immediate offset, immediate pre-indexed,
11490 immediate post-indexed. */
11491 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
11492 offset_12 = bits (arm_insn_r->arm_insn, 0, 11);
11493 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval);
11494 /* U == 1 */
11495 if (bit (arm_insn_r->arm_insn, 23))
11496 {
11497 tgt_mem_addr = u_regval + offset_12;
11498 }
11499 else
11500 {
11501 tgt_mem_addr = u_regval - offset_12;
11502 }
11503
11504 switch (arm_insn_r->opcode)
11505 {
11506 /* STR. */
11507 case 8:
11508 case 12:
11509 /* STR. */
11510 case 9:
11511 case 13:
11512 /* STRT. */
11513 case 1:
11514 case 5:
11515 /* STR. */
11516 case 4:
11517 case 0:
11518 record_buf_mem[0] = 4;
11519 break;
11520
11521 /* STRB. */
11522 case 10:
11523 case 14:
11524 /* STRB. */
11525 case 11:
11526 case 15:
11527 /* STRBT. */
11528 case 3:
11529 case 7:
11530 /* STRB. */
11531 case 2:
11532 case 6:
11533 record_buf_mem[0] = 1;
11534 break;
11535
11536 default:
11537 gdb_assert_not_reached ("no decoding pattern found");
11538 break;
11539 }
11540 record_buf_mem[1] = tgt_mem_addr;
11541 arm_insn_r->mem_rec_count = 1;
11542
11543 if (9 == arm_insn_r->opcode || 11 == arm_insn_r->opcode
11544 || 13 == arm_insn_r->opcode || 15 == arm_insn_r->opcode
11545 || 0 == arm_insn_r->opcode || 2 == arm_insn_r->opcode
11546 || 4 == arm_insn_r->opcode || 6 == arm_insn_r->opcode
11547 || 1 == arm_insn_r->opcode || 3 == arm_insn_r->opcode
11548 || 5 == arm_insn_r->opcode || 7 == arm_insn_r->opcode
11549 )
11550 {
11551 /* We are handling pre-indexed mode; post-indexed mode;
11552 where Rn is going to be changed. */
11553 record_buf[0] = reg_src1;
11554 arm_insn_r->reg_rec_count = 1;
11555 }
11556 }
11557
11558 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11559 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
11560 return 0;
11561 }
11562
11563 /* Handling opcode 011 insns. */
11564
11565 static int
11566 arm_record_ld_st_reg_offset (insn_decode_record *arm_insn_r)
11567 {
11568 struct regcache *reg_cache = arm_insn_r->regcache;
11569
11570 uint32_t shift_imm = 0;
11571 uint32_t reg_src1 = 0, reg_src2 = 0, reg_dest = 0;
11572 uint32_t offset_12 = 0, tgt_mem_addr = 0;
11573 uint32_t record_buf[8], record_buf_mem[8];
11574
11575 LONGEST s_word;
11576 ULONGEST u_regval[2];
11577
11578 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
11579 arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
11580
11581 /* Handle enhanced store insns and LDRD DSP insn,
11582 order begins according to addressing modes for store insns
11583 STRH insn. */
11584
11585 /* LDR or STR? */
11586 if (bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM))
11587 {
11588 reg_dest = bits (arm_insn_r->arm_insn, 12, 15);
11589 /* LDR insn has a capability to do branching, if
11590 MOV LR, PC is precedded by LDR insn having Rn as R15
11591 in that case, it emulates branch and link insn, and hence we
11592 need to save CSPR and PC as well. */
11593 if (15 != reg_dest)
11594 {
11595 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11596 arm_insn_r->reg_rec_count = 1;
11597 }
11598 else
11599 {
11600 record_buf[0] = reg_dest;
11601 record_buf[1] = ARM_PS_REGNUM;
11602 arm_insn_r->reg_rec_count = 2;
11603 }
11604 }
11605 else
11606 {
11607 if (! bits (arm_insn_r->arm_insn, 4, 11))
11608 {
11609 /* Store insn, register offset and register pre-indexed,
11610 register post-indexed. */
11611 /* Get Rm. */
11612 reg_src1 = bits (arm_insn_r->arm_insn, 0, 3);
11613 /* Get Rn. */
11614 reg_src2 = bits (arm_insn_r->arm_insn, 16, 19);
11615 regcache_raw_read_unsigned (reg_cache, reg_src1
11616 , &u_regval[0]);
11617 regcache_raw_read_unsigned (reg_cache, reg_src2
11618 , &u_regval[1]);
11619 if (15 == reg_src2)
11620 {
11621 /* If R15 was used as Rn, hence current PC+8. */
11622 /* Pre-indexed mode doesnt reach here ; illegal insn. */
11623 u_regval[0] = u_regval[0] + 8;
11624 }
11625 /* Calculate target store address, Rn +/- Rm, register offset. */
11626 /* U == 1. */
11627 if (bit (arm_insn_r->arm_insn, 23))
11628 {
11629 tgt_mem_addr = u_regval[0] + u_regval[1];
11630 }
11631 else
11632 {
11633 tgt_mem_addr = u_regval[1] - u_regval[0];
11634 }
11635
11636 switch (arm_insn_r->opcode)
11637 {
11638 /* STR. */
11639 case 8:
11640 case 12:
11641 /* STR. */
11642 case 9:
11643 case 13:
11644 /* STRT. */
11645 case 1:
11646 case 5:
11647 /* STR. */
11648 case 0:
11649 case 4:
11650 record_buf_mem[0] = 4;
11651 break;
11652
11653 /* STRB. */
11654 case 10:
11655 case 14:
11656 /* STRB. */
11657 case 11:
11658 case 15:
11659 /* STRBT. */
11660 case 3:
11661 case 7:
11662 /* STRB. */
11663 case 2:
11664 case 6:
11665 record_buf_mem[0] = 1;
11666 break;
11667
11668 default:
11669 gdb_assert_not_reached ("no decoding pattern found");
11670 break;
11671 }
11672 record_buf_mem[1] = tgt_mem_addr;
11673 arm_insn_r->mem_rec_count = 1;
11674
11675 if (9 == arm_insn_r->opcode || 11 == arm_insn_r->opcode
11676 || 13 == arm_insn_r->opcode || 15 == arm_insn_r->opcode
11677 || 0 == arm_insn_r->opcode || 2 == arm_insn_r->opcode
11678 || 4 == arm_insn_r->opcode || 6 == arm_insn_r->opcode
11679 || 1 == arm_insn_r->opcode || 3 == arm_insn_r->opcode
11680 || 5 == arm_insn_r->opcode || 7 == arm_insn_r->opcode
11681 )
11682 {
11683 /* Rn is going to be changed in pre-indexed mode and
11684 post-indexed mode as well. */
11685 record_buf[0] = reg_src2;
11686 arm_insn_r->reg_rec_count = 1;
11687 }
11688 }
11689 else
11690 {
11691 /* Store insn, scaled register offset; scaled pre-indexed. */
11692 offset_12 = bits (arm_insn_r->arm_insn, 5, 6);
11693 /* Get Rm. */
11694 reg_src1 = bits (arm_insn_r->arm_insn, 0, 3);
11695 /* Get Rn. */
11696 reg_src2 = bits (arm_insn_r->arm_insn, 16, 19);
11697 /* Get shift_imm. */
11698 shift_imm = bits (arm_insn_r->arm_insn, 7, 11);
11699 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
11700 regcache_raw_read_signed (reg_cache, reg_src1, &s_word);
11701 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
11702 /* Offset_12 used as shift. */
11703 switch (offset_12)
11704 {
11705 case 0:
11706 /* Offset_12 used as index. */
11707 offset_12 = u_regval[0] << shift_imm;
11708 break;
11709
11710 case 1:
11711 offset_12 = (!shift_imm)?0:u_regval[0] >> shift_imm;
11712 break;
11713
11714 case 2:
11715 if (!shift_imm)
11716 {
11717 if (bit (u_regval[0], 31))
11718 {
11719 offset_12 = 0xFFFFFFFF;
11720 }
11721 else
11722 {
11723 offset_12 = 0;
11724 }
11725 }
11726 else
11727 {
11728 /* This is arithmetic shift. */
11729 offset_12 = s_word >> shift_imm;
11730 }
11731 break;
11732
11733 case 3:
11734 if (!shift_imm)
11735 {
11736 regcache_raw_read_unsigned (reg_cache, ARM_PS_REGNUM,
11737 &u_regval[1]);
11738 /* Get C flag value and shift it by 31. */
11739 offset_12 = (((bit (u_regval[1], 29)) << 31) \
11740 | (u_regval[0]) >> 1);
11741 }
11742 else
11743 {
11744 offset_12 = (u_regval[0] >> shift_imm) \
11745 | (u_regval[0] <<
11746 (sizeof(uint32_t) - shift_imm));
11747 }
11748 break;
11749
11750 default:
11751 gdb_assert_not_reached ("no decoding pattern found");
11752 break;
11753 }
11754
11755 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
11756 /* bit U set. */
11757 if (bit (arm_insn_r->arm_insn, 23))
11758 {
11759 tgt_mem_addr = u_regval[1] + offset_12;
11760 }
11761 else
11762 {
11763 tgt_mem_addr = u_regval[1] - offset_12;
11764 }
11765
11766 switch (arm_insn_r->opcode)
11767 {
11768 /* STR. */
11769 case 8:
11770 case 12:
11771 /* STR. */
11772 case 9:
11773 case 13:
11774 /* STRT. */
11775 case 1:
11776 case 5:
11777 /* STR. */
11778 case 0:
11779 case 4:
11780 record_buf_mem[0] = 4;
11781 break;
11782
11783 /* STRB. */
11784 case 10:
11785 case 14:
11786 /* STRB. */
11787 case 11:
11788 case 15:
11789 /* STRBT. */
11790 case 3:
11791 case 7:
11792 /* STRB. */
11793 case 2:
11794 case 6:
11795 record_buf_mem[0] = 1;
11796 break;
11797
11798 default:
11799 gdb_assert_not_reached ("no decoding pattern found");
11800 break;
11801 }
11802 record_buf_mem[1] = tgt_mem_addr;
11803 arm_insn_r->mem_rec_count = 1;
11804
11805 if (9 == arm_insn_r->opcode || 11 == arm_insn_r->opcode
11806 || 13 == arm_insn_r->opcode || 15 == arm_insn_r->opcode
11807 || 0 == arm_insn_r->opcode || 2 == arm_insn_r->opcode
11808 || 4 == arm_insn_r->opcode || 6 == arm_insn_r->opcode
11809 || 1 == arm_insn_r->opcode || 3 == arm_insn_r->opcode
11810 || 5 == arm_insn_r->opcode || 7 == arm_insn_r->opcode
11811 )
11812 {
11813 /* Rn is going to be changed in register scaled pre-indexed
11814 mode,and scaled post indexed mode. */
11815 record_buf[0] = reg_src2;
11816 arm_insn_r->reg_rec_count = 1;
11817 }
11818 }
11819 }
11820
11821 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11822 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
11823 return 0;
11824 }
11825
11826 /* Handling opcode 100 insns. */
11827
11828 static int
11829 arm_record_ld_st_multiple (insn_decode_record *arm_insn_r)
11830 {
11831 struct regcache *reg_cache = arm_insn_r->regcache;
11832
11833 uint32_t register_list[16] = {0}, register_count = 0, register_bits = 0;
11834 uint32_t reg_src1 = 0, addr_mode = 0, no_of_regs = 0;
11835 uint32_t start_address = 0, index = 0;
11836 uint32_t record_buf[24], record_buf_mem[48];
11837
11838 ULONGEST u_regval[2] = {0};
11839
11840 /* This mode is exclusively for load and store multiple. */
11841 /* Handle incremenrt after/before and decrment after.before mode;
11842 Rn is changing depending on W bit, but as of now we store Rn too
11843 without optimization. */
11844
11845 if (bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM))
11846 {
11847 /* LDM (1,2,3) where LDM (3) changes CPSR too. */
11848
11849 if (bit (arm_insn_r->arm_insn, 20) && !bit (arm_insn_r->arm_insn, 22))
11850 {
11851 register_bits = bits (arm_insn_r->arm_insn, 0, 15);
11852 no_of_regs = 15;
11853 }
11854 else
11855 {
11856 register_bits = bits (arm_insn_r->arm_insn, 0, 14);
11857 no_of_regs = 14;
11858 }
11859 /* Get Rn. */
11860 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
11861 while (register_bits)
11862 {
11863 if (register_bits & 0x00000001)
11864 record_buf[index++] = register_count;
11865 register_bits = register_bits >> 1;
11866 register_count++;
11867 }
11868
11869 /* Extra space for Base Register and CPSR; wihtout optimization. */
11870 record_buf[index++] = reg_src1;
11871 record_buf[index++] = ARM_PS_REGNUM;
11872 arm_insn_r->reg_rec_count = index;
11873 }
11874 else
11875 {
11876 /* It handles both STM(1) and STM(2). */
11877 addr_mode = bits (arm_insn_r->arm_insn, 23, 24);
11878
11879 register_bits = bits (arm_insn_r->arm_insn, 0, 15);
11880 /* Get Rn. */
11881 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
11882 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
11883 while (register_bits)
11884 {
11885 if (register_bits & 0x00000001)
11886 register_count++;
11887 register_bits = register_bits >> 1;
11888 }
11889
11890 switch (addr_mode)
11891 {
11892 /* Decrement after. */
11893 case 0:
11894 start_address = (u_regval[0]) - (register_count * 4) + 4;
11895 arm_insn_r->mem_rec_count = register_count;
11896 while (register_count)
11897 {
11898 record_buf_mem[(register_count * 2) - 1] = start_address;
11899 record_buf_mem[(register_count * 2) - 2] = 4;
11900 start_address = start_address + 4;
11901 register_count--;
11902 }
11903 break;
11904
11905 /* Increment after. */
11906 case 1:
11907 start_address = u_regval[0];
11908 arm_insn_r->mem_rec_count = register_count;
11909 while (register_count)
11910 {
11911 record_buf_mem[(register_count * 2) - 1] = start_address;
11912 record_buf_mem[(register_count * 2) - 2] = 4;
11913 start_address = start_address + 4;
11914 register_count--;
11915 }
11916 break;
11917
11918 /* Decrement before. */
11919 case 2:
11920
11921 start_address = (u_regval[0]) - (register_count * 4);
11922 arm_insn_r->mem_rec_count = register_count;
11923 while (register_count)
11924 {
11925 record_buf_mem[(register_count * 2) - 1] = start_address;
11926 record_buf_mem[(register_count * 2) - 2] = 4;
11927 start_address = start_address + 4;
11928 register_count--;
11929 }
11930 break;
11931
11932 /* Increment before. */
11933 case 3:
11934 start_address = u_regval[0] + 4;
11935 arm_insn_r->mem_rec_count = register_count;
11936 while (register_count)
11937 {
11938 record_buf_mem[(register_count * 2) - 1] = start_address;
11939 record_buf_mem[(register_count * 2) - 2] = 4;
11940 start_address = start_address + 4;
11941 register_count--;
11942 }
11943 break;
11944
11945 default:
11946 gdb_assert_not_reached ("no decoding pattern found");
11947 break;
11948 }
11949
11950 /* Base register also changes; based on condition and W bit. */
11951 /* We save it anyway without optimization. */
11952 record_buf[0] = reg_src1;
11953 arm_insn_r->reg_rec_count = 1;
11954 }
11955
11956 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11957 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
11958 return 0;
11959 }
11960
11961 /* Handling opcode 101 insns. */
11962
11963 static int
11964 arm_record_b_bl (insn_decode_record *arm_insn_r)
11965 {
11966 uint32_t record_buf[8];
11967
11968 /* Handle B, BL, BLX(1) insns. */
11969 /* B simply branches so we do nothing here. */
11970 /* Note: BLX(1) doesnt fall here but instead it falls into
11971 extension space. */
11972 if (bit (arm_insn_r->arm_insn, 24))
11973 {
11974 record_buf[0] = ARM_LR_REGNUM;
11975 arm_insn_r->reg_rec_count = 1;
11976 }
11977
11978 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11979
11980 return 0;
11981 }
11982
11983 /* Handling opcode 110 insns. */
11984
11985 static int
11986 arm_record_unsupported_insn (insn_decode_record *arm_insn_r)
11987 {
11988 printf_unfiltered (_("Process record does not support instruction "
11989 "0x%0x at address %s.\n"),arm_insn_r->arm_insn,
11990 paddress (arm_insn_r->gdbarch, arm_insn_r->this_addr));
11991
11992 return -1;
11993 }
11994
11995 /* Handling opcode 111 insns. */
11996
11997 static int
11998 arm_record_coproc_data_proc (insn_decode_record *arm_insn_r)
11999 {
12000 struct gdbarch_tdep *tdep = gdbarch_tdep (arm_insn_r->gdbarch);
12001 struct regcache *reg_cache = arm_insn_r->regcache;
12002 uint32_t ret = 0; /* function return value: -1:record failure ; 0:success */
12003 ULONGEST u_regval = 0;
12004
12005 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 24, 27);
12006
12007 /* Handle arm SWI/SVC system call instructions. */
12008 if (15 == arm_insn_r->opcode)
12009 {
12010 if (tdep->arm_syscall_record != NULL)
12011 {
12012 ULONGEST svc_operand, svc_number;
12013
12014 svc_operand = (0x00ffffff & arm_insn_r->arm_insn);
12015
12016 if (svc_operand) /* OABI. */
12017 svc_number = svc_operand - 0x900000;
12018 else /* EABI. */
12019 regcache_raw_read_unsigned (reg_cache, 7, &svc_number);
12020
12021 ret = tdep->arm_syscall_record (reg_cache, svc_number);
12022 }
12023 else
12024 {
12025 printf_unfiltered (_("no syscall record support\n"));
12026 ret = -1;
12027 }
12028 }
12029 else
12030 {
12031 arm_record_unsupported_insn (arm_insn_r);
12032 ret = -1;
12033 }
12034
12035 return ret;
12036 }
12037
12038 /* Handling opcode 000 insns. */
12039
12040 static int
12041 thumb_record_shift_add_sub (insn_decode_record *thumb_insn_r)
12042 {
12043 uint32_t record_buf[8];
12044 uint32_t reg_src1 = 0;
12045
12046 reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
12047
12048 record_buf[0] = ARM_PS_REGNUM;
12049 record_buf[1] = reg_src1;
12050 thumb_insn_r->reg_rec_count = 2;
12051
12052 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12053
12054 return 0;
12055 }
12056
12057
12058 /* Handling opcode 001 insns. */
12059
12060 static int
12061 thumb_record_add_sub_cmp_mov (insn_decode_record *thumb_insn_r)
12062 {
12063 uint32_t record_buf[8];
12064 uint32_t reg_src1 = 0;
12065
12066 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12067
12068 record_buf[0] = ARM_PS_REGNUM;
12069 record_buf[1] = reg_src1;
12070 thumb_insn_r->reg_rec_count = 2;
12071
12072 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12073
12074 return 0;
12075 }
12076
12077 /* Handling opcode 010 insns. */
12078
12079 static int
12080 thumb_record_ld_st_reg_offset (insn_decode_record *thumb_insn_r)
12081 {
12082 struct regcache *reg_cache = thumb_insn_r->regcache;
12083 uint32_t record_buf[8], record_buf_mem[8];
12084
12085 uint32_t reg_src1 = 0, reg_src2 = 0;
12086 uint32_t opcode1 = 0, opcode2 = 0, opcode3 = 0;
12087
12088 ULONGEST u_regval[2] = {0};
12089
12090 opcode1 = bits (thumb_insn_r->arm_insn, 10, 12);
12091
12092 if (bit (thumb_insn_r->arm_insn, 12))
12093 {
12094 /* Handle load/store register offset. */
12095 opcode2 = bits (thumb_insn_r->arm_insn, 9, 10);
12096 if (opcode2 >= 12 && opcode2 <= 15)
12097 {
12098 /* LDR(2), LDRB(2) , LDRH(2), LDRSB, LDRSH. */
12099 reg_src1 = bits (thumb_insn_r->arm_insn,0, 2);
12100 record_buf[0] = reg_src1;
12101 thumb_insn_r->reg_rec_count = 1;
12102 }
12103 else if (opcode2 >= 8 && opcode2 <= 10)
12104 {
12105 /* STR(2), STRB(2), STRH(2) . */
12106 reg_src1 = bits (thumb_insn_r->arm_insn, 3, 5);
12107 reg_src2 = bits (thumb_insn_r->arm_insn, 6, 8);
12108 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
12109 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
12110 if (8 == opcode2)
12111 record_buf_mem[0] = 4; /* STR (2). */
12112 else if (10 == opcode2)
12113 record_buf_mem[0] = 1; /* STRB (2). */
12114 else if (9 == opcode2)
12115 record_buf_mem[0] = 2; /* STRH (2). */
12116 record_buf_mem[1] = u_regval[0] + u_regval[1];
12117 thumb_insn_r->mem_rec_count = 1;
12118 }
12119 }
12120 else if (bit (thumb_insn_r->arm_insn, 11))
12121 {
12122 /* Handle load from literal pool. */
12123 /* LDR(3). */
12124 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12125 record_buf[0] = reg_src1;
12126 thumb_insn_r->reg_rec_count = 1;
12127 }
12128 else if (opcode1)
12129 {
12130 opcode2 = bits (thumb_insn_r->arm_insn, 8, 9);
12131 opcode3 = bits (thumb_insn_r->arm_insn, 0, 2);
12132 if ((3 == opcode2) && (!opcode3))
12133 {
12134 /* Branch with exchange. */
12135 record_buf[0] = ARM_PS_REGNUM;
12136 thumb_insn_r->reg_rec_count = 1;
12137 }
12138 else
12139 {
12140 /* Format 8; special data processing insns. */
12141 reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
12142 record_buf[0] = ARM_PS_REGNUM;
12143 record_buf[1] = reg_src1;
12144 thumb_insn_r->reg_rec_count = 2;
12145 }
12146 }
12147 else
12148 {
12149 /* Format 5; data processing insns. */
12150 reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
12151 if (bit (thumb_insn_r->arm_insn, 7))
12152 {
12153 reg_src1 = reg_src1 + 8;
12154 }
12155 record_buf[0] = ARM_PS_REGNUM;
12156 record_buf[1] = reg_src1;
12157 thumb_insn_r->reg_rec_count = 2;
12158 }
12159
12160 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12161 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
12162 record_buf_mem);
12163
12164 return 0;
12165 }
12166
12167 /* Handling opcode 001 insns. */
12168
12169 static int
12170 thumb_record_ld_st_imm_offset (insn_decode_record *thumb_insn_r)
12171 {
12172 struct regcache *reg_cache = thumb_insn_r->regcache;
12173 uint32_t record_buf[8], record_buf_mem[8];
12174
12175 uint32_t reg_src1 = 0;
12176 uint32_t opcode = 0, immed_5 = 0;
12177
12178 ULONGEST u_regval = 0;
12179
12180 opcode = bits (thumb_insn_r->arm_insn, 11, 12);
12181
12182 if (opcode)
12183 {
12184 /* LDR(1). */
12185 reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
12186 record_buf[0] = reg_src1;
12187 thumb_insn_r->reg_rec_count = 1;
12188 }
12189 else
12190 {
12191 /* STR(1). */
12192 reg_src1 = bits (thumb_insn_r->arm_insn, 3, 5);
12193 immed_5 = bits (thumb_insn_r->arm_insn, 6, 10);
12194 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval);
12195 record_buf_mem[0] = 4;
12196 record_buf_mem[1] = u_regval + (immed_5 * 4);
12197 thumb_insn_r->mem_rec_count = 1;
12198 }
12199
12200 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12201 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
12202 record_buf_mem);
12203
12204 return 0;
12205 }
12206
12207 /* Handling opcode 100 insns. */
12208
12209 static int
12210 thumb_record_ld_st_stack (insn_decode_record *thumb_insn_r)
12211 {
12212 struct regcache *reg_cache = thumb_insn_r->regcache;
12213 uint32_t record_buf[8], record_buf_mem[8];
12214
12215 uint32_t reg_src1 = 0;
12216 uint32_t opcode = 0, immed_8 = 0, immed_5 = 0;
12217
12218 ULONGEST u_regval = 0;
12219
12220 opcode = bits (thumb_insn_r->arm_insn, 11, 12);
12221
12222 if (3 == opcode)
12223 {
12224 /* LDR(4). */
12225 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12226 record_buf[0] = reg_src1;
12227 thumb_insn_r->reg_rec_count = 1;
12228 }
12229 else if (1 == opcode)
12230 {
12231 /* LDRH(1). */
12232 reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
12233 record_buf[0] = reg_src1;
12234 thumb_insn_r->reg_rec_count = 1;
12235 }
12236 else if (2 == opcode)
12237 {
12238 /* STR(3). */
12239 immed_8 = bits (thumb_insn_r->arm_insn, 0, 7);
12240 regcache_raw_read_unsigned (reg_cache, ARM_SP_REGNUM, &u_regval);
12241 record_buf_mem[0] = 4;
12242 record_buf_mem[1] = u_regval + (immed_8 * 4);
12243 thumb_insn_r->mem_rec_count = 1;
12244 }
12245 else if (0 == opcode)
12246 {
12247 /* STRH(1). */
12248 immed_5 = bits (thumb_insn_r->arm_insn, 6, 10);
12249 reg_src1 = bits (thumb_insn_r->arm_insn, 3, 5);
12250 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval);
12251 record_buf_mem[0] = 2;
12252 record_buf_mem[1] = u_regval + (immed_5 * 2);
12253 thumb_insn_r->mem_rec_count = 1;
12254 }
12255
12256 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12257 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
12258 record_buf_mem);
12259
12260 return 0;
12261 }
12262
12263 /* Handling opcode 101 insns. */
12264
12265 static int
12266 thumb_record_misc (insn_decode_record *thumb_insn_r)
12267 {
12268 struct regcache *reg_cache = thumb_insn_r->regcache;
12269
12270 uint32_t opcode = 0, opcode1 = 0, opcode2 = 0;
12271 uint32_t register_bits = 0, register_count = 0;
12272 uint32_t register_list[8] = {0}, index = 0, start_address = 0;
12273 uint32_t record_buf[24], record_buf_mem[48];
12274 uint32_t reg_src1;
12275
12276 ULONGEST u_regval = 0;
12277
12278 opcode = bits (thumb_insn_r->arm_insn, 11, 12);
12279 opcode1 = bits (thumb_insn_r->arm_insn, 8, 12);
12280 opcode2 = bits (thumb_insn_r->arm_insn, 9, 12);
12281
12282 if (14 == opcode2)
12283 {
12284 /* POP. */
12285 register_bits = bits (thumb_insn_r->arm_insn, 0, 7);
12286 while (register_bits)
12287 {
12288 if (register_bits & 0x00000001)
12289 record_buf[index++] = register_count;
12290 register_bits = register_bits >> 1;
12291 register_count++;
12292 }
12293 record_buf[index++] = ARM_PS_REGNUM;
12294 record_buf[index++] = ARM_SP_REGNUM;
12295 thumb_insn_r->reg_rec_count = index;
12296 }
12297 else if (10 == opcode2)
12298 {
12299 /* PUSH. */
12300 register_bits = bits (thumb_insn_r->arm_insn, 0, 7);
12301 regcache_raw_read_unsigned (reg_cache, ARM_SP_REGNUM, &u_regval);
12302 while (register_bits)
12303 {
12304 if (register_bits & 0x00000001)
12305 register_count++;
12306 register_bits = register_bits >> 1;
12307 }
12308 start_address = u_regval - \
12309 (4 * (bit (thumb_insn_r->arm_insn, 8) + register_count));
12310 thumb_insn_r->mem_rec_count = register_count;
12311 while (register_count)
12312 {
12313 record_buf_mem[(register_count * 2) - 1] = start_address;
12314 record_buf_mem[(register_count * 2) - 2] = 4;
12315 start_address = start_address + 4;
12316 register_count--;
12317 }
12318 record_buf[0] = ARM_SP_REGNUM;
12319 thumb_insn_r->reg_rec_count = 1;
12320 }
12321 else if (0x1E == opcode1)
12322 {
12323 /* BKPT insn. */
12324 /* Handle enhanced software breakpoint insn, BKPT. */
12325 /* CPSR is changed to be executed in ARM state, disabling normal
12326 interrupts, entering abort mode. */
12327 /* According to high vector configuration PC is set. */
12328 /* User hits breakpoint and type reverse, in that case, we need to go back with
12329 previous CPSR and Program Counter. */
12330 record_buf[0] = ARM_PS_REGNUM;
12331 record_buf[1] = ARM_LR_REGNUM;
12332 thumb_insn_r->reg_rec_count = 2;
12333 /* We need to save SPSR value, which is not yet done. */
12334 printf_unfiltered (_("Process record does not support instruction "
12335 "0x%0x at address %s.\n"),
12336 thumb_insn_r->arm_insn,
12337 paddress (thumb_insn_r->gdbarch,
12338 thumb_insn_r->this_addr));
12339 return -1;
12340 }
12341 else if ((0 == opcode) || (1 == opcode))
12342 {
12343 /* ADD(5), ADD(6). */
12344 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12345 record_buf[0] = reg_src1;
12346 thumb_insn_r->reg_rec_count = 1;
12347 }
12348 else if (2 == opcode)
12349 {
12350 /* ADD(7), SUB(4). */
12351 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12352 record_buf[0] = ARM_SP_REGNUM;
12353 thumb_insn_r->reg_rec_count = 1;
12354 }
12355
12356 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12357 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
12358 record_buf_mem);
12359
12360 return 0;
12361 }
12362
12363 /* Handling opcode 110 insns. */
12364
12365 static int
12366 thumb_record_ldm_stm_swi (insn_decode_record *thumb_insn_r)
12367 {
12368 struct gdbarch_tdep *tdep = gdbarch_tdep (thumb_insn_r->gdbarch);
12369 struct regcache *reg_cache = thumb_insn_r->regcache;
12370
12371 uint32_t ret = 0; /* function return value: -1:record failure ; 0:success */
12372 uint32_t reg_src1 = 0;
12373 uint32_t opcode1 = 0, opcode2 = 0, register_bits = 0, register_count = 0;
12374 uint32_t register_list[8] = {0}, index = 0, start_address = 0;
12375 uint32_t record_buf[24], record_buf_mem[48];
12376
12377 ULONGEST u_regval = 0;
12378
12379 opcode1 = bits (thumb_insn_r->arm_insn, 8, 12);
12380 opcode2 = bits (thumb_insn_r->arm_insn, 11, 12);
12381
12382 if (1 == opcode2)
12383 {
12384
12385 /* LDMIA. */
12386 register_bits = bits (thumb_insn_r->arm_insn, 0, 7);
12387 /* Get Rn. */
12388 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12389 while (register_bits)
12390 {
12391 if (register_bits & 0x00000001)
12392 record_buf[index++] = register_count;
12393 register_bits = register_bits >> 1;
12394 register_count++;
12395 }
12396 record_buf[index++] = reg_src1;
12397 thumb_insn_r->reg_rec_count = index;
12398 }
12399 else if (0 == opcode2)
12400 {
12401 /* It handles both STMIA. */
12402 register_bits = bits (thumb_insn_r->arm_insn, 0, 7);
12403 /* Get Rn. */
12404 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12405 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval);
12406 while (register_bits)
12407 {
12408 if (register_bits & 0x00000001)
12409 register_count++;
12410 register_bits = register_bits >> 1;
12411 }
12412 start_address = u_regval;
12413 thumb_insn_r->mem_rec_count = register_count;
12414 while (register_count)
12415 {
12416 record_buf_mem[(register_count * 2) - 1] = start_address;
12417 record_buf_mem[(register_count * 2) - 2] = 4;
12418 start_address = start_address + 4;
12419 register_count--;
12420 }
12421 }
12422 else if (0x1F == opcode1)
12423 {
12424 /* Handle arm syscall insn. */
12425 if (tdep->arm_syscall_record != NULL)
12426 {
12427 regcache_raw_read_unsigned (reg_cache, 7, &u_regval);
12428 ret = tdep->arm_syscall_record (reg_cache, u_regval);
12429 }
12430 else
12431 {
12432 printf_unfiltered (_("no syscall record support\n"));
12433 return -1;
12434 }
12435 }
12436
12437 /* B (1), conditional branch is automatically taken care in process_record,
12438 as PC is saved there. */
12439
12440 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12441 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
12442 record_buf_mem);
12443
12444 return ret;
12445 }
12446
12447 /* Handling opcode 111 insns. */
12448
12449 static int
12450 thumb_record_branch (insn_decode_record *thumb_insn_r)
12451 {
12452 uint32_t record_buf[8];
12453 uint32_t bits_h = 0;
12454
12455 bits_h = bits (thumb_insn_r->arm_insn, 11, 12);
12456
12457 if (2 == bits_h || 3 == bits_h)
12458 {
12459 /* BL */
12460 record_buf[0] = ARM_LR_REGNUM;
12461 thumb_insn_r->reg_rec_count = 1;
12462 }
12463 else if (1 == bits_h)
12464 {
12465 /* BLX(1). */
12466 record_buf[0] = ARM_PS_REGNUM;
12467 record_buf[1] = ARM_LR_REGNUM;
12468 thumb_insn_r->reg_rec_count = 2;
12469 }
12470
12471 /* B(2) is automatically taken care in process_record, as PC is
12472 saved there. */
12473
12474 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12475
12476 return 0;
12477 }
12478
12479 /* Handler for thumb2 load/store multiple instructions. */
12480
12481 static int
12482 thumb2_record_ld_st_multiple (insn_decode_record *thumb2_insn_r)
12483 {
12484 struct regcache *reg_cache = thumb2_insn_r->regcache;
12485
12486 uint32_t reg_rn, op;
12487 uint32_t register_bits = 0, register_count = 0;
12488 uint32_t index = 0, start_address = 0;
12489 uint32_t record_buf[24], record_buf_mem[48];
12490
12491 ULONGEST u_regval = 0;
12492
12493 reg_rn = bits (thumb2_insn_r->arm_insn, 16, 19);
12494 op = bits (thumb2_insn_r->arm_insn, 23, 24);
12495
12496 if (0 == op || 3 == op)
12497 {
12498 if (bit (thumb2_insn_r->arm_insn, INSN_S_L_BIT_NUM))
12499 {
12500 /* Handle RFE instruction. */
12501 record_buf[0] = ARM_PS_REGNUM;
12502 thumb2_insn_r->reg_rec_count = 1;
12503 }
12504 else
12505 {
12506 /* Handle SRS instruction after reading banked SP. */
12507 return arm_record_unsupported_insn (thumb2_insn_r);
12508 }
12509 }
12510 else if (1 == op || 2 == op)
12511 {
12512 if (bit (thumb2_insn_r->arm_insn, INSN_S_L_BIT_NUM))
12513 {
12514 /* Handle LDM/LDMIA/LDMFD and LDMDB/LDMEA instructions. */
12515 register_bits = bits (thumb2_insn_r->arm_insn, 0, 15);
12516 while (register_bits)
12517 {
12518 if (register_bits & 0x00000001)
12519 record_buf[index++] = register_count;
12520
12521 register_count++;
12522 register_bits = register_bits >> 1;
12523 }
12524 record_buf[index++] = reg_rn;
12525 record_buf[index++] = ARM_PS_REGNUM;
12526 thumb2_insn_r->reg_rec_count = index;
12527 }
12528 else
12529 {
12530 /* Handle STM/STMIA/STMEA and STMDB/STMFD. */
12531 register_bits = bits (thumb2_insn_r->arm_insn, 0, 15);
12532 regcache_raw_read_unsigned (reg_cache, reg_rn, &u_regval);
12533 while (register_bits)
12534 {
12535 if (register_bits & 0x00000001)
12536 register_count++;
12537
12538 register_bits = register_bits >> 1;
12539 }
12540
12541 if (1 == op)
12542 {
12543 /* Start address calculation for LDMDB/LDMEA. */
12544 start_address = u_regval;
12545 }
12546 else if (2 == op)
12547 {
12548 /* Start address calculation for LDMDB/LDMEA. */
12549 start_address = u_regval - register_count * 4;
12550 }
12551
12552 thumb2_insn_r->mem_rec_count = register_count;
12553 while (register_count)
12554 {
12555 record_buf_mem[register_count * 2 - 1] = start_address;
12556 record_buf_mem[register_count * 2 - 2] = 4;
12557 start_address = start_address + 4;
12558 register_count--;
12559 }
12560 record_buf[0] = reg_rn;
12561 record_buf[1] = ARM_PS_REGNUM;
12562 thumb2_insn_r->reg_rec_count = 2;
12563 }
12564 }
12565
12566 MEM_ALLOC (thumb2_insn_r->arm_mems, thumb2_insn_r->mem_rec_count,
12567 record_buf_mem);
12568 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12569 record_buf);
12570 return ARM_RECORD_SUCCESS;
12571 }
12572
12573 /* Handler for thumb2 load/store (dual/exclusive) and table branch
12574 instructions. */
12575
12576 static int
12577 thumb2_record_ld_st_dual_ex_tbb (insn_decode_record *thumb2_insn_r)
12578 {
12579 struct regcache *reg_cache = thumb2_insn_r->regcache;
12580
12581 uint32_t reg_rd, reg_rn, offset_imm;
12582 uint32_t reg_dest1, reg_dest2;
12583 uint32_t address, offset_addr;
12584 uint32_t record_buf[8], record_buf_mem[8];
12585 uint32_t op1, op2, op3;
12586 LONGEST s_word;
12587
12588 ULONGEST u_regval[2];
12589
12590 op1 = bits (thumb2_insn_r->arm_insn, 23, 24);
12591 op2 = bits (thumb2_insn_r->arm_insn, 20, 21);
12592 op3 = bits (thumb2_insn_r->arm_insn, 4, 7);
12593
12594 if (bit (thumb2_insn_r->arm_insn, INSN_S_L_BIT_NUM))
12595 {
12596 if(!(1 == op1 && 1 == op2 && (0 == op3 || 1 == op3)))
12597 {
12598 reg_dest1 = bits (thumb2_insn_r->arm_insn, 12, 15);
12599 record_buf[0] = reg_dest1;
12600 record_buf[1] = ARM_PS_REGNUM;
12601 thumb2_insn_r->reg_rec_count = 2;
12602 }
12603
12604 if (3 == op2 || (op1 & 2) || (1 == op1 && 1 == op2 && 7 == op3))
12605 {
12606 reg_dest2 = bits (thumb2_insn_r->arm_insn, 8, 11);
12607 record_buf[2] = reg_dest2;
12608 thumb2_insn_r->reg_rec_count = 3;
12609 }
12610 }
12611 else
12612 {
12613 reg_rn = bits (thumb2_insn_r->arm_insn, 16, 19);
12614 regcache_raw_read_unsigned (reg_cache, reg_rn, &u_regval[0]);
12615
12616 if (0 == op1 && 0 == op2)
12617 {
12618 /* Handle STREX. */
12619 offset_imm = bits (thumb2_insn_r->arm_insn, 0, 7);
12620 address = u_regval[0] + (offset_imm * 4);
12621 record_buf_mem[0] = 4;
12622 record_buf_mem[1] = address;
12623 thumb2_insn_r->mem_rec_count = 1;
12624 reg_rd = bits (thumb2_insn_r->arm_insn, 0, 3);
12625 record_buf[0] = reg_rd;
12626 thumb2_insn_r->reg_rec_count = 1;
12627 }
12628 else if (1 == op1 && 0 == op2)
12629 {
12630 reg_rd = bits (thumb2_insn_r->arm_insn, 0, 3);
12631 record_buf[0] = reg_rd;
12632 thumb2_insn_r->reg_rec_count = 1;
12633 address = u_regval[0];
12634 record_buf_mem[1] = address;
12635
12636 if (4 == op3)
12637 {
12638 /* Handle STREXB. */
12639 record_buf_mem[0] = 1;
12640 thumb2_insn_r->mem_rec_count = 1;
12641 }
12642 else if (5 == op3)
12643 {
12644 /* Handle STREXH. */
12645 record_buf_mem[0] = 2 ;
12646 thumb2_insn_r->mem_rec_count = 1;
12647 }
12648 else if (7 == op3)
12649 {
12650 /* Handle STREXD. */
12651 address = u_regval[0];
12652 record_buf_mem[0] = 4;
12653 record_buf_mem[2] = 4;
12654 record_buf_mem[3] = address + 4;
12655 thumb2_insn_r->mem_rec_count = 2;
12656 }
12657 }
12658 else
12659 {
12660 offset_imm = bits (thumb2_insn_r->arm_insn, 0, 7);
12661
12662 if (bit (thumb2_insn_r->arm_insn, 24))
12663 {
12664 if (bit (thumb2_insn_r->arm_insn, 23))
12665 offset_addr = u_regval[0] + (offset_imm * 4);
12666 else
12667 offset_addr = u_regval[0] - (offset_imm * 4);
12668
12669 address = offset_addr;
12670 }
12671 else
12672 address = u_regval[0];
12673
12674 record_buf_mem[0] = 4;
12675 record_buf_mem[1] = address;
12676 record_buf_mem[2] = 4;
12677 record_buf_mem[3] = address + 4;
12678 thumb2_insn_r->mem_rec_count = 2;
12679 record_buf[0] = reg_rn;
12680 thumb2_insn_r->reg_rec_count = 1;
12681 }
12682 }
12683
12684 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12685 record_buf);
12686 MEM_ALLOC (thumb2_insn_r->arm_mems, thumb2_insn_r->mem_rec_count,
12687 record_buf_mem);
12688 return ARM_RECORD_SUCCESS;
12689 }
12690
12691 /* Handler for thumb2 data processing (shift register and modified immediate)
12692 instructions. */
12693
12694 static int
12695 thumb2_record_data_proc_sreg_mimm (insn_decode_record *thumb2_insn_r)
12696 {
12697 uint32_t reg_rd, op;
12698 uint32_t record_buf[8];
12699
12700 op = bits (thumb2_insn_r->arm_insn, 21, 24);
12701 reg_rd = bits (thumb2_insn_r->arm_insn, 8, 11);
12702
12703 if ((0 == op || 4 == op || 8 == op || 13 == op) && 15 == reg_rd)
12704 {
12705 record_buf[0] = ARM_PS_REGNUM;
12706 thumb2_insn_r->reg_rec_count = 1;
12707 }
12708 else
12709 {
12710 record_buf[0] = reg_rd;
12711 record_buf[1] = ARM_PS_REGNUM;
12712 thumb2_insn_r->reg_rec_count = 2;
12713 }
12714
12715 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12716 record_buf);
12717 return ARM_RECORD_SUCCESS;
12718 }
12719
12720 /* Generic handler for thumb2 instructions which effect destination and PS
12721 registers. */
12722
12723 static int
12724 thumb2_record_ps_dest_generic (insn_decode_record *thumb2_insn_r)
12725 {
12726 uint32_t reg_rd;
12727 uint32_t record_buf[8];
12728
12729 reg_rd = bits (thumb2_insn_r->arm_insn, 8, 11);
12730
12731 record_buf[0] = reg_rd;
12732 record_buf[1] = ARM_PS_REGNUM;
12733 thumb2_insn_r->reg_rec_count = 2;
12734
12735 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12736 record_buf);
12737 return ARM_RECORD_SUCCESS;
12738 }
12739
12740 /* Handler for thumb2 branch and miscellaneous control instructions. */
12741
12742 static int
12743 thumb2_record_branch_misc_cntrl (insn_decode_record *thumb2_insn_r)
12744 {
12745 uint32_t op, op1, op2;
12746 uint32_t record_buf[8];
12747
12748 op = bits (thumb2_insn_r->arm_insn, 20, 26);
12749 op1 = bits (thumb2_insn_r->arm_insn, 12, 14);
12750 op2 = bits (thumb2_insn_r->arm_insn, 8, 11);
12751
12752 /* Handle MSR insn. */
12753 if (!(op1 & 0x2) && 0x38 == op)
12754 {
12755 if (!(op2 & 0x3))
12756 {
12757 /* CPSR is going to be changed. */
12758 record_buf[0] = ARM_PS_REGNUM;
12759 thumb2_insn_r->reg_rec_count = 1;
12760 }
12761 else
12762 {
12763 arm_record_unsupported_insn(thumb2_insn_r);
12764 return -1;
12765 }
12766 }
12767 else if (4 == (op1 & 0x5) || 5 == (op1 & 0x5))
12768 {
12769 /* BLX. */
12770 record_buf[0] = ARM_PS_REGNUM;
12771 record_buf[1] = ARM_LR_REGNUM;
12772 thumb2_insn_r->reg_rec_count = 2;
12773 }
12774
12775 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12776 record_buf);
12777 return ARM_RECORD_SUCCESS;
12778 }
12779
12780 /* Handler for thumb2 store single data item instructions. */
12781
12782 static int
12783 thumb2_record_str_single_data (insn_decode_record *thumb2_insn_r)
12784 {
12785 struct regcache *reg_cache = thumb2_insn_r->regcache;
12786
12787 uint32_t reg_rn, reg_rm, offset_imm, shift_imm;
12788 uint32_t address, offset_addr;
12789 uint32_t record_buf[8], record_buf_mem[8];
12790 uint32_t op1, op2;
12791
12792 ULONGEST u_regval[2];
12793
12794 op1 = bits (thumb2_insn_r->arm_insn, 21, 23);
12795 op2 = bits (thumb2_insn_r->arm_insn, 6, 11);
12796 reg_rn = bits (thumb2_insn_r->arm_insn, 16, 19);
12797 regcache_raw_read_unsigned (reg_cache, reg_rn, &u_regval[0]);
12798
12799 if (bit (thumb2_insn_r->arm_insn, 23))
12800 {
12801 /* T2 encoding. */
12802 offset_imm = bits (thumb2_insn_r->arm_insn, 0, 11);
12803 offset_addr = u_regval[0] + offset_imm;
12804 address = offset_addr;
12805 }
12806 else
12807 {
12808 /* T3 encoding. */
12809 if ((0 == op1 || 1 == op1 || 2 == op1) && !(op2 & 0x20))
12810 {
12811 /* Handle STRB (register). */
12812 reg_rm = bits (thumb2_insn_r->arm_insn, 0, 3);
12813 regcache_raw_read_unsigned (reg_cache, reg_rm, &u_regval[1]);
12814 shift_imm = bits (thumb2_insn_r->arm_insn, 4, 5);
12815 offset_addr = u_regval[1] << shift_imm;
12816 address = u_regval[0] + offset_addr;
12817 }
12818 else
12819 {
12820 offset_imm = bits (thumb2_insn_r->arm_insn, 0, 7);
12821 if (bit (thumb2_insn_r->arm_insn, 10))
12822 {
12823 if (bit (thumb2_insn_r->arm_insn, 9))
12824 offset_addr = u_regval[0] + offset_imm;
12825 else
12826 offset_addr = u_regval[0] - offset_imm;
12827
12828 address = offset_addr;
12829 }
12830 else
12831 address = u_regval[0];
12832 }
12833 }
12834
12835 switch (op1)
12836 {
12837 /* Store byte instructions. */
12838 case 4:
12839 case 0:
12840 record_buf_mem[0] = 1;
12841 break;
12842 /* Store half word instructions. */
12843 case 1:
12844 case 5:
12845 record_buf_mem[0] = 2;
12846 break;
12847 /* Store word instructions. */
12848 case 2:
12849 case 6:
12850 record_buf_mem[0] = 4;
12851 break;
12852
12853 default:
12854 gdb_assert_not_reached ("no decoding pattern found");
12855 break;
12856 }
12857
12858 record_buf_mem[1] = address;
12859 thumb2_insn_r->mem_rec_count = 1;
12860 record_buf[0] = reg_rn;
12861 thumb2_insn_r->reg_rec_count = 1;
12862
12863 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12864 record_buf);
12865 MEM_ALLOC (thumb2_insn_r->arm_mems, thumb2_insn_r->mem_rec_count,
12866 record_buf_mem);
12867 return ARM_RECORD_SUCCESS;
12868 }
12869
12870 /* Handler for thumb2 load memory hints instructions. */
12871
12872 static int
12873 thumb2_record_ld_mem_hints (insn_decode_record *thumb2_insn_r)
12874 {
12875 uint32_t record_buf[8];
12876 uint32_t reg_rt, reg_rn;
12877
12878 reg_rt = bits (thumb2_insn_r->arm_insn, 12, 15);
12879 reg_rn = bits (thumb2_insn_r->arm_insn, 16, 19);
12880
12881 if (ARM_PC_REGNUM != reg_rt)
12882 {
12883 record_buf[0] = reg_rt;
12884 record_buf[1] = reg_rn;
12885 record_buf[2] = ARM_PS_REGNUM;
12886 thumb2_insn_r->reg_rec_count = 3;
12887
12888 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12889 record_buf);
12890 return ARM_RECORD_SUCCESS;
12891 }
12892
12893 return ARM_RECORD_FAILURE;
12894 }
12895
12896 /* Handler for thumb2 load word instructions. */
12897
12898 static int
12899 thumb2_record_ld_word (insn_decode_record *thumb2_insn_r)
12900 {
12901 uint32_t opcode1 = 0, opcode2 = 0;
12902 uint32_t record_buf[8];
12903
12904 record_buf[0] = bits (thumb2_insn_r->arm_insn, 12, 15);
12905 record_buf[1] = ARM_PS_REGNUM;
12906 thumb2_insn_r->reg_rec_count = 2;
12907
12908 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12909 record_buf);
12910 return ARM_RECORD_SUCCESS;
12911 }
12912
12913 /* Handler for thumb2 long multiply, long multiply accumulate, and
12914 divide instructions. */
12915
12916 static int
12917 thumb2_record_lmul_lmla_div (insn_decode_record *thumb2_insn_r)
12918 {
12919 uint32_t opcode1 = 0, opcode2 = 0;
12920 uint32_t record_buf[8];
12921 uint32_t reg_src1 = 0;
12922
12923 opcode1 = bits (thumb2_insn_r->arm_insn, 20, 22);
12924 opcode2 = bits (thumb2_insn_r->arm_insn, 4, 7);
12925
12926 if (0 == opcode1 || 2 == opcode1 || (opcode1 >= 4 && opcode1 <= 6))
12927 {
12928 /* Handle SMULL, UMULL, SMULAL. */
12929 /* Handle SMLAL(S), SMULL(S), UMLAL(S), UMULL(S). */
12930 record_buf[0] = bits (thumb2_insn_r->arm_insn, 16, 19);
12931 record_buf[1] = bits (thumb2_insn_r->arm_insn, 12, 15);
12932 record_buf[2] = ARM_PS_REGNUM;
12933 thumb2_insn_r->reg_rec_count = 3;
12934 }
12935 else if (1 == opcode1 || 3 == opcode2)
12936 {
12937 /* Handle SDIV and UDIV. */
12938 record_buf[0] = bits (thumb2_insn_r->arm_insn, 16, 19);
12939 record_buf[1] = bits (thumb2_insn_r->arm_insn, 12, 15);
12940 record_buf[2] = ARM_PS_REGNUM;
12941 thumb2_insn_r->reg_rec_count = 3;
12942 }
12943 else
12944 return ARM_RECORD_FAILURE;
12945
12946 REG_ALLOC (thumb2_insn_r->arm_regs, thumb2_insn_r->reg_rec_count,
12947 record_buf);
12948 return ARM_RECORD_SUCCESS;
12949 }
12950
12951 /* Decodes thumb2 instruction type and invokes its record handler. */
12952
12953 static unsigned int
12954 thumb2_record_decode_insn_handler (insn_decode_record *thumb2_insn_r)
12955 {
12956 uint32_t op, op1, op2;
12957
12958 op = bit (thumb2_insn_r->arm_insn, 15);
12959 op1 = bits (thumb2_insn_r->arm_insn, 27, 28);
12960 op2 = bits (thumb2_insn_r->arm_insn, 20, 26);
12961
12962 if (op1 == 0x01)
12963 {
12964 if (!(op2 & 0x64 ))
12965 {
12966 /* Load/store multiple instruction. */
12967 return thumb2_record_ld_st_multiple (thumb2_insn_r);
12968 }
12969 else if (!((op2 & 0x64) ^ 0x04))
12970 {
12971 /* Load/store (dual/exclusive) and table branch instruction. */
12972 return thumb2_record_ld_st_dual_ex_tbb (thumb2_insn_r);
12973 }
12974 else if (!((op2 & 0x20) ^ 0x20))
12975 {
12976 /* Data-processing (shifted register). */
12977 return thumb2_record_data_proc_sreg_mimm (thumb2_insn_r);
12978 }
12979 else if (op2 & 0x40)
12980 {
12981 /* Co-processor instructions. */
12982 arm_record_unsupported_insn (thumb2_insn_r);
12983 }
12984 }
12985 else if (op1 == 0x02)
12986 {
12987 if (op)
12988 {
12989 /* Branches and miscellaneous control instructions. */
12990 return thumb2_record_branch_misc_cntrl (thumb2_insn_r);
12991 }
12992 else if (op2 & 0x20)
12993 {
12994 /* Data-processing (plain binary immediate) instruction. */
12995 return thumb2_record_ps_dest_generic (thumb2_insn_r);
12996 }
12997 else
12998 {
12999 /* Data-processing (modified immediate). */
13000 return thumb2_record_data_proc_sreg_mimm (thumb2_insn_r);
13001 }
13002 }
13003 else if (op1 == 0x03)
13004 {
13005 if (!(op2 & 0x71 ))
13006 {
13007 /* Store single data item. */
13008 return thumb2_record_str_single_data (thumb2_insn_r);
13009 }
13010 else if (!((op2 & 0x71) ^ 0x10))
13011 {
13012 /* Advanced SIMD or structure load/store instructions. */
13013 return arm_record_unsupported_insn (thumb2_insn_r);
13014 }
13015 else if (!((op2 & 0x67) ^ 0x01))
13016 {
13017 /* Load byte, memory hints instruction. */
13018 return thumb2_record_ld_mem_hints (thumb2_insn_r);
13019 }
13020 else if (!((op2 & 0x67) ^ 0x03))
13021 {
13022 /* Load halfword, memory hints instruction. */
13023 return thumb2_record_ld_mem_hints (thumb2_insn_r);
13024 }
13025 else if (!((op2 & 0x67) ^ 0x05))
13026 {
13027 /* Load word instruction. */
13028 return thumb2_record_ld_word (thumb2_insn_r);
13029 }
13030 else if (!((op2 & 0x70) ^ 0x20))
13031 {
13032 /* Data-processing (register) instruction. */
13033 return thumb2_record_ps_dest_generic (thumb2_insn_r);
13034 }
13035 else if (!((op2 & 0x78) ^ 0x30))
13036 {
13037 /* Multiply, multiply accumulate, abs diff instruction. */
13038 return thumb2_record_ps_dest_generic (thumb2_insn_r);
13039 }
13040 else if (!((op2 & 0x78) ^ 0x38))
13041 {
13042 /* Long multiply, long multiply accumulate, and divide. */
13043 return thumb2_record_lmul_lmla_div (thumb2_insn_r);
13044 }
13045 else if (op2 & 0x40)
13046 {
13047 /* Co-processor instructions. */
13048 return arm_record_unsupported_insn (thumb2_insn_r);
13049 }
13050 }
13051
13052 return -1;
13053 }
13054
13055 /* Extracts arm/thumb/thumb2 insn depending on the size, and returns 0 on success
13056 and positive val on fauilure. */
13057
13058 static int
13059 extract_arm_insn (insn_decode_record *insn_record, uint32_t insn_size)
13060 {
13061 gdb_byte buf[insn_size];
13062
13063 memset (&buf[0], 0, insn_size);
13064
13065 if (target_read_memory (insn_record->this_addr, &buf[0], insn_size))
13066 return 1;
13067 insn_record->arm_insn = (uint32_t) extract_unsigned_integer (&buf[0],
13068 insn_size,
13069 gdbarch_byte_order (insn_record->gdbarch));
13070 return 0;
13071 }
13072
13073 typedef int (*sti_arm_hdl_fp_t) (insn_decode_record*);
13074
13075 /* Decode arm/thumb insn depending on condition cods and opcodes; and
13076 dispatch it. */
13077
13078 static int
13079 decode_insn (insn_decode_record *arm_record, record_type_t record_type,
13080 uint32_t insn_size)
13081 {
13082
13083 /* (Starting from numerical 0); bits 25, 26, 27 decodes type of arm instruction. */
13084 static const sti_arm_hdl_fp_t const arm_handle_insn[8] =
13085 {
13086 arm_record_data_proc_misc_ld_str, /* 000. */
13087 arm_record_data_proc_imm, /* 001. */
13088 arm_record_ld_st_imm_offset, /* 010. */
13089 arm_record_ld_st_reg_offset, /* 011. */
13090 arm_record_ld_st_multiple, /* 100. */
13091 arm_record_b_bl, /* 101. */
13092 arm_record_unsupported_insn, /* 110. */
13093 arm_record_coproc_data_proc /* 111. */
13094 };
13095
13096 /* (Starting from numerical 0); bits 13,14,15 decodes type of thumb instruction. */
13097 static const sti_arm_hdl_fp_t const thumb_handle_insn[8] =
13098 { \
13099 thumb_record_shift_add_sub, /* 000. */
13100 thumb_record_add_sub_cmp_mov, /* 001. */
13101 thumb_record_ld_st_reg_offset, /* 010. */
13102 thumb_record_ld_st_imm_offset, /* 011. */
13103 thumb_record_ld_st_stack, /* 100. */
13104 thumb_record_misc, /* 101. */
13105 thumb_record_ldm_stm_swi, /* 110. */
13106 thumb_record_branch /* 111. */
13107 };
13108
13109 uint32_t ret = 0; /* return value: negative:failure 0:success. */
13110 uint32_t insn_id = 0;
13111
13112 if (extract_arm_insn (arm_record, insn_size))
13113 {
13114 if (record_debug)
13115 {
13116 printf_unfiltered (_("Process record: error reading memory at "
13117 "addr %s len = %d.\n"),
13118 paddress (arm_record->gdbarch, arm_record->this_addr), insn_size);
13119 }
13120 return -1;
13121 }
13122 else if (ARM_RECORD == record_type)
13123 {
13124 arm_record->cond = bits (arm_record->arm_insn, 28, 31);
13125 insn_id = bits (arm_record->arm_insn, 25, 27);
13126 ret = arm_record_extension_space (arm_record);
13127 /* If this insn has fallen into extension space
13128 then we need not decode it anymore. */
13129 if (ret != -1 && !INSN_RECORDED(arm_record))
13130 {
13131 ret = arm_handle_insn[insn_id] (arm_record);
13132 }
13133 }
13134 else if (THUMB_RECORD == record_type)
13135 {
13136 /* As thumb does not have condition codes, we set negative. */
13137 arm_record->cond = -1;
13138 insn_id = bits (arm_record->arm_insn, 13, 15);
13139 ret = thumb_handle_insn[insn_id] (arm_record);
13140 }
13141 else if (THUMB2_RECORD == record_type)
13142 {
13143 /* As thumb does not have condition codes, we set negative. */
13144 arm_record->cond = -1;
13145
13146 /* Swap first half of 32bit thumb instruction with second half. */
13147 arm_record->arm_insn
13148 = (arm_record->arm_insn >> 16) | (arm_record->arm_insn << 16);
13149
13150 insn_id = thumb2_record_decode_insn_handler (arm_record);
13151
13152 if (insn_id != ARM_RECORD_SUCCESS)
13153 {
13154 arm_record_unsupported_insn (arm_record);
13155 ret = -1;
13156 }
13157 }
13158 else
13159 {
13160 /* Throw assertion. */
13161 gdb_assert_not_reached ("not a valid instruction, could not decode");
13162 }
13163
13164 return ret;
13165 }
13166
13167
13168 /* Cleans up local record registers and memory allocations. */
13169
13170 static void
13171 deallocate_reg_mem (insn_decode_record *record)
13172 {
13173 xfree (record->arm_regs);
13174 xfree (record->arm_mems);
13175 }
13176
13177
13178 /* Parse the current instruction and record the values of the registers and
13179 memory that will be changed in current instruction to record_arch_list".
13180 Return -1 if something is wrong. */
13181
13182 int
13183 arm_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
13184 CORE_ADDR insn_addr)
13185 {
13186
13187 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
13188 uint32_t no_of_rec = 0;
13189 uint32_t ret = 0; /* return value: -1:record failure ; 0:success */
13190 ULONGEST t_bit = 0, insn_id = 0;
13191
13192 ULONGEST u_regval = 0;
13193
13194 insn_decode_record arm_record;
13195
13196 memset (&arm_record, 0, sizeof (insn_decode_record));
13197 arm_record.regcache = regcache;
13198 arm_record.this_addr = insn_addr;
13199 arm_record.gdbarch = gdbarch;
13200
13201
13202 if (record_debug > 1)
13203 {
13204 fprintf_unfiltered (gdb_stdlog, "Process record: arm_process_record "
13205 "addr = %s\n",
13206 paddress (gdbarch, arm_record.this_addr));
13207 }
13208
13209 if (extract_arm_insn (&arm_record, 2))
13210 {
13211 if (record_debug)
13212 {
13213 printf_unfiltered (_("Process record: error reading memory at "
13214 "addr %s len = %d.\n"),
13215 paddress (arm_record.gdbarch,
13216 arm_record.this_addr), 2);
13217 }
13218 return -1;
13219 }
13220
13221 /* Check the insn, whether it is thumb or arm one. */
13222
13223 t_bit = arm_psr_thumb_bit (arm_record.gdbarch);
13224 regcache_raw_read_unsigned (arm_record.regcache, ARM_PS_REGNUM, &u_regval);
13225
13226
13227 if (!(u_regval & t_bit))
13228 {
13229 /* We are decoding arm insn. */
13230 ret = decode_insn (&arm_record, ARM_RECORD, ARM_INSN_SIZE_BYTES);
13231 }
13232 else
13233 {
13234 insn_id = bits (arm_record.arm_insn, 11, 15);
13235 /* is it thumb2 insn? */
13236 if ((0x1D == insn_id) || (0x1E == insn_id) || (0x1F == insn_id))
13237 {
13238 ret = decode_insn (&arm_record, THUMB2_RECORD,
13239 THUMB2_INSN_SIZE_BYTES);
13240 }
13241 else
13242 {
13243 /* We are decoding thumb insn. */
13244 ret = decode_insn (&arm_record, THUMB_RECORD, THUMB_INSN_SIZE_BYTES);
13245 }
13246 }
13247
13248 if (0 == ret)
13249 {
13250 /* Record registers. */
13251 record_full_arch_list_add_reg (arm_record.regcache, ARM_PC_REGNUM);
13252 if (arm_record.arm_regs)
13253 {
13254 for (no_of_rec = 0; no_of_rec < arm_record.reg_rec_count; no_of_rec++)
13255 {
13256 if (record_full_arch_list_add_reg
13257 (arm_record.regcache , arm_record.arm_regs[no_of_rec]))
13258 ret = -1;
13259 }
13260 }
13261 /* Record memories. */
13262 if (arm_record.arm_mems)
13263 {
13264 for (no_of_rec = 0; no_of_rec < arm_record.mem_rec_count; no_of_rec++)
13265 {
13266 if (record_full_arch_list_add_mem
13267 ((CORE_ADDR)arm_record.arm_mems[no_of_rec].addr,
13268 arm_record.arm_mems[no_of_rec].len))
13269 ret = -1;
13270 }
13271 }
13272
13273 if (record_full_arch_list_add_end ())
13274 ret = -1;
13275 }
13276
13277
13278 deallocate_reg_mem (&arm_record);
13279
13280 return ret;
13281 }
13282