]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/ia64-tdep.c
Update copyright year range in header of all files managed by GDB
[thirdparty/binutils-gdb.git] / gdb / ia64-tdep.c
1 /* Target-dependent code for the IA-64 for GDB, the GNU debugger.
2
3 Copyright (C) 1999-2024 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "inferior.h"
22 #include "gdbcore.h"
23 #include "arch-utils.h"
24 #include "floatformat.h"
25 #include "gdbtypes.h"
26 #include "regcache.h"
27 #include "reggroups.h"
28 #include "frame.h"
29 #include "frame-base.h"
30 #include "frame-unwind.h"
31 #include "target-float.h"
32 #include "value.h"
33 #include "objfiles.h"
34 #include "elf/common.h"
35 #include "elf-bfd.h"
36 #include "dis-asm.h"
37 #include "infcall.h"
38 #include "osabi.h"
39 #include "ia64-tdep.h"
40 #include "cp-abi.h"
41
42 #ifdef HAVE_LIBUNWIND_IA64_H
43 #include "elf/ia64.h"
44 #include "ia64-libunwind-tdep.h"
45
46 /* Note: KERNEL_START is supposed to be an address which is not going
47 to ever contain any valid unwind info. For ia64 linux, the choice
48 of 0xc000000000000000 is fairly safe since that's uncached space.
49
50 We use KERNEL_START as follows: after obtaining the kernel's
51 unwind table via getunwind(), we project its unwind data into
52 address-range KERNEL_START-(KERNEL_START+ktab_size) and then
53 when ia64_access_mem() sees a memory access to this
54 address-range, we redirect it to ktab instead.
55
56 None of this hackery is needed with a modern kernel/libcs
57 which uses the kernel virtual DSO to provide access to the
58 kernel's unwind info. In that case, ktab_size remains 0 and
59 hence the value of KERNEL_START doesn't matter. */
60
61 #define KERNEL_START 0xc000000000000000ULL
62
63 static size_t ktab_size = 0;
64 struct ia64_table_entry
65 {
66 uint64_t start_offset;
67 uint64_t end_offset;
68 uint64_t info_offset;
69 };
70
71 static struct ia64_table_entry *ktab = NULL;
72 static std::optional<gdb::byte_vector> ktab_buf;
73
74 #endif
75
76 /* An enumeration of the different IA-64 instruction types. */
77
78 enum ia64_instruction_type
79 {
80 A, /* Integer ALU ; I-unit or M-unit */
81 I, /* Non-ALU integer; I-unit */
82 M, /* Memory ; M-unit */
83 F, /* Floating-point ; F-unit */
84 B, /* Branch ; B-unit */
85 L, /* Extended (L+X) ; I-unit */
86 X, /* Extended (L+X) ; I-unit */
87 undefined /* undefined or reserved */
88 };
89
90 /* We represent IA-64 PC addresses as the value of the instruction
91 pointer or'd with some bit combination in the low nibble which
92 represents the slot number in the bundle addressed by the
93 instruction pointer. The problem is that the Linux kernel
94 multiplies its slot numbers (for exceptions) by one while the
95 disassembler multiplies its slot numbers by 6. In addition, I've
96 heard it said that the simulator uses 1 as the multiplier.
97
98 I've fixed the disassembler so that the bytes_per_line field will
99 be the slot multiplier. If bytes_per_line comes in as zero, it
100 is set to six (which is how it was set up initially). -- objdump
101 displays pretty disassembly dumps with this value. For our purposes,
102 we'll set bytes_per_line to SLOT_MULTIPLIER. This is okay since we
103 never want to also display the raw bytes the way objdump does. */
104
105 #define SLOT_MULTIPLIER 1
106
107 /* Length in bytes of an instruction bundle. */
108
109 #define BUNDLE_LEN 16
110
111 /* See the saved memory layout comment for ia64_memory_insert_breakpoint. */
112
113 #if BREAKPOINT_MAX < BUNDLE_LEN - 2
114 # error "BREAKPOINT_MAX < BUNDLE_LEN - 2"
115 #endif
116
117 static gdbarch_init_ftype ia64_gdbarch_init;
118
119 static gdbarch_register_name_ftype ia64_register_name;
120 static gdbarch_register_type_ftype ia64_register_type;
121 static gdbarch_breakpoint_from_pc_ftype ia64_breakpoint_from_pc;
122 static gdbarch_skip_prologue_ftype ia64_skip_prologue;
123 static struct type *is_float_or_hfa_type (struct type *t);
124 static CORE_ADDR ia64_find_global_pointer (struct gdbarch *gdbarch,
125 CORE_ADDR faddr);
126
127 #define NUM_IA64_RAW_REGS 462
128
129 /* Big enough to hold a FP register in bytes. */
130 #define IA64_FP_REGISTER_SIZE 16
131
132 static int sp_regnum = IA64_GR12_REGNUM;
133
134 /* NOTE: we treat the register stack registers r32-r127 as
135 pseudo-registers because they may not be accessible via the ptrace
136 register get/set interfaces. */
137
138 enum pseudo_regs { FIRST_PSEUDO_REGNUM = NUM_IA64_RAW_REGS,
139 VBOF_REGNUM = IA64_NAT127_REGNUM + 1, V32_REGNUM,
140 V127_REGNUM = V32_REGNUM + 95,
141 VP0_REGNUM, VP16_REGNUM = VP0_REGNUM + 16,
142 VP63_REGNUM = VP0_REGNUM + 63, LAST_PSEUDO_REGNUM };
143
144 /* Array of register names; There should be ia64_num_regs strings in
145 the initializer. */
146
147 static const char * const ia64_register_names[] =
148 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
149 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
150 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
151 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
152 "", "", "", "", "", "", "", "",
153 "", "", "", "", "", "", "", "",
154 "", "", "", "", "", "", "", "",
155 "", "", "", "", "", "", "", "",
156 "", "", "", "", "", "", "", "",
157 "", "", "", "", "", "", "", "",
158 "", "", "", "", "", "", "", "",
159 "", "", "", "", "", "", "", "",
160 "", "", "", "", "", "", "", "",
161 "", "", "", "", "", "", "", "",
162 "", "", "", "", "", "", "", "",
163 "", "", "", "", "", "", "", "",
164
165 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
166 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
167 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
168 "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31",
169 "f32", "f33", "f34", "f35", "f36", "f37", "f38", "f39",
170 "f40", "f41", "f42", "f43", "f44", "f45", "f46", "f47",
171 "f48", "f49", "f50", "f51", "f52", "f53", "f54", "f55",
172 "f56", "f57", "f58", "f59", "f60", "f61", "f62", "f63",
173 "f64", "f65", "f66", "f67", "f68", "f69", "f70", "f71",
174 "f72", "f73", "f74", "f75", "f76", "f77", "f78", "f79",
175 "f80", "f81", "f82", "f83", "f84", "f85", "f86", "f87",
176 "f88", "f89", "f90", "f91", "f92", "f93", "f94", "f95",
177 "f96", "f97", "f98", "f99", "f100", "f101", "f102", "f103",
178 "f104", "f105", "f106", "f107", "f108", "f109", "f110", "f111",
179 "f112", "f113", "f114", "f115", "f116", "f117", "f118", "f119",
180 "f120", "f121", "f122", "f123", "f124", "f125", "f126", "f127",
181
182 "", "", "", "", "", "", "", "",
183 "", "", "", "", "", "", "", "",
184 "", "", "", "", "", "", "", "",
185 "", "", "", "", "", "", "", "",
186 "", "", "", "", "", "", "", "",
187 "", "", "", "", "", "", "", "",
188 "", "", "", "", "", "", "", "",
189 "", "", "", "", "", "", "", "",
190
191 "b0", "b1", "b2", "b3", "b4", "b5", "b6", "b7",
192
193 "vfp", "vrap",
194
195 "pr", "ip", "psr", "cfm",
196
197 "kr0", "kr1", "kr2", "kr3", "kr4", "kr5", "kr6", "kr7",
198 "", "", "", "", "", "", "", "",
199 "rsc", "bsp", "bspstore", "rnat",
200 "", "fcr", "", "",
201 "eflag", "csd", "ssd", "cflg", "fsr", "fir", "fdr", "",
202 "ccv", "", "", "", "unat", "", "", "",
203 "fpsr", "", "", "", "itc",
204 "", "", "", "", "", "", "", "", "", "",
205 "", "", "", "", "", "", "", "", "",
206 "pfs", "lc", "ec",
207 "", "", "", "", "", "", "", "", "", "",
208 "", "", "", "", "", "", "", "", "", "",
209 "", "", "", "", "", "", "", "", "", "",
210 "", "", "", "", "", "", "", "", "", "",
211 "", "", "", "", "", "", "", "", "", "",
212 "", "", "", "", "", "", "", "", "", "",
213 "",
214 "nat0", "nat1", "nat2", "nat3", "nat4", "nat5", "nat6", "nat7",
215 "nat8", "nat9", "nat10", "nat11", "nat12", "nat13", "nat14", "nat15",
216 "nat16", "nat17", "nat18", "nat19", "nat20", "nat21", "nat22", "nat23",
217 "nat24", "nat25", "nat26", "nat27", "nat28", "nat29", "nat30", "nat31",
218 "nat32", "nat33", "nat34", "nat35", "nat36", "nat37", "nat38", "nat39",
219 "nat40", "nat41", "nat42", "nat43", "nat44", "nat45", "nat46", "nat47",
220 "nat48", "nat49", "nat50", "nat51", "nat52", "nat53", "nat54", "nat55",
221 "nat56", "nat57", "nat58", "nat59", "nat60", "nat61", "nat62", "nat63",
222 "nat64", "nat65", "nat66", "nat67", "nat68", "nat69", "nat70", "nat71",
223 "nat72", "nat73", "nat74", "nat75", "nat76", "nat77", "nat78", "nat79",
224 "nat80", "nat81", "nat82", "nat83", "nat84", "nat85", "nat86", "nat87",
225 "nat88", "nat89", "nat90", "nat91", "nat92", "nat93", "nat94", "nat95",
226 "nat96", "nat97", "nat98", "nat99", "nat100","nat101","nat102","nat103",
227 "nat104","nat105","nat106","nat107","nat108","nat109","nat110","nat111",
228 "nat112","nat113","nat114","nat115","nat116","nat117","nat118","nat119",
229 "nat120","nat121","nat122","nat123","nat124","nat125","nat126","nat127",
230
231 "bof",
232
233 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
234 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
235 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
236 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
237 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
238 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
239 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
240 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
241 "r96", "r97", "r98", "r99", "r100", "r101", "r102", "r103",
242 "r104", "r105", "r106", "r107", "r108", "r109", "r110", "r111",
243 "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
244 "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127",
245
246 "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7",
247 "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15",
248 "p16", "p17", "p18", "p19", "p20", "p21", "p22", "p23",
249 "p24", "p25", "p26", "p27", "p28", "p29", "p30", "p31",
250 "p32", "p33", "p34", "p35", "p36", "p37", "p38", "p39",
251 "p40", "p41", "p42", "p43", "p44", "p45", "p46", "p47",
252 "p48", "p49", "p50", "p51", "p52", "p53", "p54", "p55",
253 "p56", "p57", "p58", "p59", "p60", "p61", "p62", "p63",
254 };
255
256 struct ia64_frame_cache
257 {
258 CORE_ADDR base; /* frame pointer base for frame */
259 CORE_ADDR pc; /* function start pc for frame */
260 CORE_ADDR saved_sp; /* stack pointer for frame */
261 CORE_ADDR bsp; /* points at r32 for the current frame */
262 CORE_ADDR cfm; /* cfm value for current frame */
263 CORE_ADDR prev_cfm; /* cfm value for previous frame */
264 int frameless;
265 int sof; /* Size of frame (decoded from cfm value). */
266 int sol; /* Size of locals (decoded from cfm value). */
267 int sor; /* Number of rotating registers (decoded from
268 cfm value). */
269 CORE_ADDR after_prologue;
270 /* Address of first instruction after the last
271 prologue instruction; Note that there may
272 be instructions from the function's body
273 intermingled with the prologue. */
274 int mem_stack_frame_size;
275 /* Size of the memory stack frame (may be zero),
276 or -1 if it has not been determined yet. */
277 int fp_reg; /* Register number (if any) used a frame pointer
278 for this frame. 0 if no register is being used
279 as the frame pointer. */
280
281 /* Saved registers. */
282 CORE_ADDR saved_regs[NUM_IA64_RAW_REGS];
283
284 };
285
286 static int
287 floatformat_valid (const struct floatformat *fmt, const void *from)
288 {
289 return 1;
290 }
291
292 static const struct floatformat floatformat_ia64_ext_little =
293 {
294 floatformat_little, 82, 0, 1, 17, 65535, 0x1ffff, 18, 64,
295 floatformat_intbit_yes, "floatformat_ia64_ext_little", floatformat_valid, NULL
296 };
297
298 static const struct floatformat floatformat_ia64_ext_big =
299 {
300 floatformat_big, 82, 46, 47, 17, 65535, 0x1ffff, 64, 64,
301 floatformat_intbit_yes, "floatformat_ia64_ext_big", floatformat_valid
302 };
303
304 static const struct floatformat *floatformats_ia64_ext[2] =
305 {
306 &floatformat_ia64_ext_big,
307 &floatformat_ia64_ext_little
308 };
309
310 static struct type *
311 ia64_ext_type (struct gdbarch *gdbarch)
312 {
313 ia64_gdbarch_tdep *tdep = gdbarch_tdep<ia64_gdbarch_tdep> (gdbarch);
314
315 if (!tdep->ia64_ext_type)
316 {
317 type_allocator alloc (gdbarch);
318 tdep->ia64_ext_type
319 = init_float_type (alloc, 128, "builtin_type_ia64_ext",
320 floatformats_ia64_ext);
321 }
322
323 return tdep->ia64_ext_type;
324 }
325
326 static int
327 ia64_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
328 const struct reggroup *group)
329 {
330 int vector_p;
331 int float_p;
332 int raw_p;
333 if (group == all_reggroup)
334 return 1;
335 vector_p = register_type (gdbarch, regnum)->is_vector ();
336 float_p = register_type (gdbarch, regnum)->code () == TYPE_CODE_FLT;
337 raw_p = regnum < NUM_IA64_RAW_REGS;
338 if (group == float_reggroup)
339 return float_p;
340 if (group == vector_reggroup)
341 return vector_p;
342 if (group == general_reggroup)
343 return (!vector_p && !float_p);
344 if (group == save_reggroup || group == restore_reggroup)
345 return raw_p;
346 return 0;
347 }
348
349 static const char *
350 ia64_register_name (struct gdbarch *gdbarch, int reg)
351 {
352 return ia64_register_names[reg];
353 }
354
355 struct type *
356 ia64_register_type (struct gdbarch *arch, int reg)
357 {
358 if (reg >= IA64_FR0_REGNUM && reg <= IA64_FR127_REGNUM)
359 return ia64_ext_type (arch);
360 else
361 return builtin_type (arch)->builtin_long;
362 }
363
364 static int
365 ia64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
366 {
367 if (reg >= IA64_GR32_REGNUM && reg <= IA64_GR127_REGNUM)
368 return V32_REGNUM + (reg - IA64_GR32_REGNUM);
369 return reg;
370 }
371
372
373 /* Extract ``len'' bits from an instruction bundle starting at
374 bit ``from''. */
375
376 static long long
377 extract_bit_field (const gdb_byte *bundle, int from, int len)
378 {
379 long long result = 0LL;
380 int to = from + len;
381 int from_byte = from / 8;
382 int to_byte = to / 8;
383 unsigned char *b = (unsigned char *) bundle;
384 unsigned char c;
385 int lshift;
386 int i;
387
388 c = b[from_byte];
389 if (from_byte == to_byte)
390 c = ((unsigned char) (c << (8 - to % 8))) >> (8 - to % 8);
391 result = c >> (from % 8);
392 lshift = 8 - (from % 8);
393
394 for (i = from_byte+1; i < to_byte; i++)
395 {
396 result |= ((long long) b[i]) << lshift;
397 lshift += 8;
398 }
399
400 if (from_byte < to_byte && (to % 8 != 0))
401 {
402 c = b[to_byte];
403 c = ((unsigned char) (c << (8 - to % 8))) >> (8 - to % 8);
404 result |= ((long long) c) << lshift;
405 }
406
407 return result;
408 }
409
410 /* Replace the specified bits in an instruction bundle. */
411
412 static void
413 replace_bit_field (gdb_byte *bundle, long long val, int from, int len)
414 {
415 int to = from + len;
416 int from_byte = from / 8;
417 int to_byte = to / 8;
418 unsigned char *b = (unsigned char *) bundle;
419 unsigned char c;
420
421 if (from_byte == to_byte)
422 {
423 unsigned char left, right;
424 c = b[from_byte];
425 left = (c >> (to % 8)) << (to % 8);
426 right = ((unsigned char) (c << (8 - from % 8))) >> (8 - from % 8);
427 c = (unsigned char) (val & 0xff);
428 c = (unsigned char) (c << (from % 8 + 8 - to % 8)) >> (8 - to % 8);
429 c |= right | left;
430 b[from_byte] = c;
431 }
432 else
433 {
434 int i;
435 c = b[from_byte];
436 c = ((unsigned char) (c << (8 - from % 8))) >> (8 - from % 8);
437 c = c | (val << (from % 8));
438 b[from_byte] = c;
439 val >>= 8 - from % 8;
440
441 for (i = from_byte+1; i < to_byte; i++)
442 {
443 c = val & 0xff;
444 val >>= 8;
445 b[i] = c;
446 }
447
448 if (to % 8 != 0)
449 {
450 unsigned char cv = (unsigned char) val;
451 c = b[to_byte];
452 c = c >> (to % 8) << (to % 8);
453 c |= ((unsigned char) (cv << (8 - to % 8))) >> (8 - to % 8);
454 b[to_byte] = c;
455 }
456 }
457 }
458
459 /* Return the contents of slot N (for N = 0, 1, or 2) in
460 and instruction bundle. */
461
462 static long long
463 slotN_contents (gdb_byte *bundle, int slotnum)
464 {
465 return extract_bit_field (bundle, 5+41*slotnum, 41);
466 }
467
468 /* Store an instruction in an instruction bundle. */
469
470 static void
471 replace_slotN_contents (gdb_byte *bundle, long long instr, int slotnum)
472 {
473 replace_bit_field (bundle, instr, 5+41*slotnum, 41);
474 }
475
476 static const enum ia64_instruction_type template_encoding_table[32][3] =
477 {
478 { M, I, I }, /* 00 */
479 { M, I, I }, /* 01 */
480 { M, I, I }, /* 02 */
481 { M, I, I }, /* 03 */
482 { M, L, X }, /* 04 */
483 { M, L, X }, /* 05 */
484 { undefined, undefined, undefined }, /* 06 */
485 { undefined, undefined, undefined }, /* 07 */
486 { M, M, I }, /* 08 */
487 { M, M, I }, /* 09 */
488 { M, M, I }, /* 0A */
489 { M, M, I }, /* 0B */
490 { M, F, I }, /* 0C */
491 { M, F, I }, /* 0D */
492 { M, M, F }, /* 0E */
493 { M, M, F }, /* 0F */
494 { M, I, B }, /* 10 */
495 { M, I, B }, /* 11 */
496 { M, B, B }, /* 12 */
497 { M, B, B }, /* 13 */
498 { undefined, undefined, undefined }, /* 14 */
499 { undefined, undefined, undefined }, /* 15 */
500 { B, B, B }, /* 16 */
501 { B, B, B }, /* 17 */
502 { M, M, B }, /* 18 */
503 { M, M, B }, /* 19 */
504 { undefined, undefined, undefined }, /* 1A */
505 { undefined, undefined, undefined }, /* 1B */
506 { M, F, B }, /* 1C */
507 { M, F, B }, /* 1D */
508 { undefined, undefined, undefined }, /* 1E */
509 { undefined, undefined, undefined }, /* 1F */
510 };
511
512 /* Fetch and (partially) decode an instruction at ADDR and return the
513 address of the next instruction to fetch. */
514
515 static CORE_ADDR
516 fetch_instruction (CORE_ADDR addr, ia64_instruction_type *it, long long *instr)
517 {
518 gdb_byte bundle[BUNDLE_LEN];
519 int slotnum = (int) (addr & 0x0f) / SLOT_MULTIPLIER;
520 long long templ;
521 int val;
522
523 /* Warn about slot numbers greater than 2. We used to generate
524 an error here on the assumption that the user entered an invalid
525 address. But, sometimes GDB itself requests an invalid address.
526 This can (easily) happen when execution stops in a function for
527 which there are no symbols. The prologue scanner will attempt to
528 find the beginning of the function - if the nearest symbol
529 happens to not be aligned on a bundle boundary (16 bytes), the
530 resulting starting address will cause GDB to think that the slot
531 number is too large.
532
533 So we warn about it and set the slot number to zero. It is
534 not necessarily a fatal condition, particularly if debugging
535 at the assembly language level. */
536 if (slotnum > 2)
537 {
538 warning (_("Can't fetch instructions for slot numbers greater than 2.\n"
539 "Using slot 0 instead"));
540 slotnum = 0;
541 }
542
543 addr &= ~0x0f;
544
545 val = target_read_memory (addr, bundle, BUNDLE_LEN);
546
547 if (val != 0)
548 return 0;
549
550 *instr = slotN_contents (bundle, slotnum);
551 templ = extract_bit_field (bundle, 0, 5);
552 *it = template_encoding_table[(int)templ][slotnum];
553
554 if (slotnum == 2 || (slotnum == 1 && *it == L))
555 addr += 16;
556 else
557 addr += (slotnum + 1) * SLOT_MULTIPLIER;
558
559 return addr;
560 }
561
562 /* There are 5 different break instructions (break.i, break.b,
563 break.m, break.f, and break.x), but they all have the same
564 encoding. (The five bit template in the low five bits of the
565 instruction bundle distinguishes one from another.)
566
567 The runtime architecture manual specifies that break instructions
568 used for debugging purposes must have the upper two bits of the 21
569 bit immediate set to a 0 and a 1 respectively. A breakpoint
570 instruction encodes the most significant bit of its 21 bit
571 immediate at bit 36 of the 41 bit instruction. The penultimate msb
572 is at bit 25 which leads to the pattern below.
573
574 Originally, I had this set up to do, e.g, a "break.i 0x80000" But
575 it turns out that 0x80000 was used as the syscall break in the early
576 simulators. So I changed the pattern slightly to do "break.i 0x080001"
577 instead. But that didn't work either (I later found out that this
578 pattern was used by the simulator that I was using.) So I ended up
579 using the pattern seen below.
580
581 SHADOW_CONTENTS has byte-based addressing (PLACED_ADDRESS and SHADOW_LEN)
582 while we need bit-based addressing as the instructions length is 41 bits and
583 we must not modify/corrupt the adjacent slots in the same bundle.
584 Fortunately we may store larger memory incl. the adjacent bits with the
585 original memory content (not the possibly already stored breakpoints there).
586 We need to be careful in ia64_memory_remove_breakpoint to always restore
587 only the specific bits of this instruction ignoring any adjacent stored
588 bits.
589
590 We use the original addressing with the low nibble in the range <0..2> which
591 gets incorrectly interpreted by generic non-ia64 breakpoint_restore_shadows
592 as the direct byte offset of SHADOW_CONTENTS. We store whole BUNDLE_LEN
593 bytes just without these two possibly skipped bytes to not to exceed to the
594 next bundle.
595
596 If we would like to store the whole bundle to SHADOW_CONTENTS we would have
597 to store already the base address (`address & ~0x0f') into PLACED_ADDRESS.
598 In such case there is no other place where to store
599 SLOTNUM (`adress & 0x0f', value in the range <0..2>). We need to know
600 SLOTNUM in ia64_memory_remove_breakpoint.
601
602 There is one special case where we need to be extra careful:
603 L-X instructions, which are instructions that occupy 2 slots
604 (The L part is always in slot 1, and the X part is always in
605 slot 2). We must refuse to insert breakpoints for an address
606 that points at slot 2 of a bundle where an L-X instruction is
607 present, since there is logically no instruction at that address.
608 However, to make things more interesting, the opcode of L-X
609 instructions is located in slot 2. This means that, to insert
610 a breakpoint at an address that points to slot 1, we actually
611 need to write the breakpoint in slot 2! Slot 1 is actually
612 the extended operand, so writing the breakpoint there would not
613 have the desired effect. Another side-effect of this issue
614 is that we need to make sure that the shadow contents buffer
615 does save byte 15 of our instruction bundle (this is the tail
616 end of slot 2, which wouldn't be saved if we were to insert
617 the breakpoint in slot 1).
618
619 ia64 16-byte bundle layout:
620 | 5 bits | slot 0 with 41 bits | slot 1 with 41 bits | slot 2 with 41 bits |
621
622 The current addressing used by the code below:
623 original PC placed_address placed_size required covered
624 == bp_tgt->shadow_len reqd \subset covered
625 0xABCDE0 0xABCDE0 0x10 <0x0...0x5> <0x0..0xF>
626 0xABCDE1 0xABCDE1 0xF <0x5...0xA> <0x1..0xF>
627 0xABCDE2 0xABCDE2 0xE <0xA...0xF> <0x2..0xF>
628
629 L-X instructions are treated a little specially, as explained above:
630 0xABCDE1 0xABCDE1 0xF <0xA...0xF> <0x1..0xF>
631
632 `objdump -d' and some other tools show a bit unjustified offsets:
633 original PC byte where starts the instruction objdump offset
634 0xABCDE0 0xABCDE0 0xABCDE0
635 0xABCDE1 0xABCDE5 0xABCDE6
636 0xABCDE2 0xABCDEA 0xABCDEC
637 */
638
639 #define IA64_BREAKPOINT 0x00003333300LL
640
641 static int
642 ia64_memory_insert_breakpoint (struct gdbarch *gdbarch,
643 struct bp_target_info *bp_tgt)
644 {
645 CORE_ADDR addr = bp_tgt->placed_address = bp_tgt->reqstd_address;
646 gdb_byte bundle[BUNDLE_LEN];
647 int slotnum = (int) (addr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
648 long long instr_breakpoint;
649 int val;
650 int templ;
651
652 if (slotnum > 2)
653 error (_("Can't insert breakpoint for slot numbers greater than 2."));
654
655 addr &= ~0x0f;
656
657 /* Enable the automatic memory restoration from breakpoints while
658 we read our instruction bundle for the purpose of SHADOW_CONTENTS.
659 Otherwise, we could possibly store into the shadow parts of the adjacent
660 placed breakpoints. It is due to our SHADOW_CONTENTS overlapping the real
661 breakpoint instruction bits region. */
662 scoped_restore restore_memory_0
663 = make_scoped_restore_show_memory_breakpoints (0);
664 val = target_read_memory (addr, bundle, BUNDLE_LEN);
665 if (val != 0)
666 return val;
667
668 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
669 for addressing the SHADOW_CONTENTS placement. */
670 shadow_slotnum = slotnum;
671
672 /* Always cover the last byte of the bundle in case we are inserting
673 a breakpoint on an L-X instruction. */
674 bp_tgt->shadow_len = BUNDLE_LEN - shadow_slotnum;
675
676 templ = extract_bit_field (bundle, 0, 5);
677 if (template_encoding_table[templ][slotnum] == X)
678 {
679 /* X unit types can only be used in slot 2, and are actually
680 part of a 2-slot L-X instruction. We cannot break at this
681 address, as this is the second half of an instruction that
682 lives in slot 1 of that bundle. */
683 gdb_assert (slotnum == 2);
684 error (_("Can't insert breakpoint for non-existing slot X"));
685 }
686 if (template_encoding_table[templ][slotnum] == L)
687 {
688 /* L unit types can only be used in slot 1. But the associated
689 opcode for that instruction is in slot 2, so bump the slot number
690 accordingly. */
691 gdb_assert (slotnum == 1);
692 slotnum = 2;
693 }
694
695 /* Store the whole bundle, except for the initial skipped bytes by the slot
696 number interpreted as bytes offset in PLACED_ADDRESS. */
697 memcpy (bp_tgt->shadow_contents, bundle + shadow_slotnum,
698 bp_tgt->shadow_len);
699
700 /* Re-read the same bundle as above except that, this time, read it in order
701 to compute the new bundle inside which we will be inserting the
702 breakpoint. Therefore, disable the automatic memory restoration from
703 breakpoints while we read our instruction bundle. Otherwise, the general
704 restoration mechanism kicks in and we would possibly remove parts of the
705 adjacent placed breakpoints. It is due to our SHADOW_CONTENTS overlapping
706 the real breakpoint instruction bits region. */
707 scoped_restore restore_memory_1
708 = make_scoped_restore_show_memory_breakpoints (1);
709 val = target_read_memory (addr, bundle, BUNDLE_LEN);
710 if (val != 0)
711 return val;
712
713 /* Breakpoints already present in the code will get detected and not get
714 reinserted by bp_loc_is_permanent. Multiple breakpoints at the same
715 location cannot induce the internal error as they are optimized into
716 a single instance by update_global_location_list. */
717 instr_breakpoint = slotN_contents (bundle, slotnum);
718 if (instr_breakpoint == IA64_BREAKPOINT)
719 internal_error (_("Address %s already contains a breakpoint."),
720 paddress (gdbarch, bp_tgt->placed_address));
721 replace_slotN_contents (bundle, IA64_BREAKPOINT, slotnum);
722
723 val = target_write_memory (addr + shadow_slotnum, bundle + shadow_slotnum,
724 bp_tgt->shadow_len);
725
726 return val;
727 }
728
729 static int
730 ia64_memory_remove_breakpoint (struct gdbarch *gdbarch,
731 struct bp_target_info *bp_tgt)
732 {
733 CORE_ADDR addr = bp_tgt->placed_address;
734 gdb_byte bundle_mem[BUNDLE_LEN], bundle_saved[BUNDLE_LEN];
735 int slotnum = (addr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
736 long long instr_breakpoint, instr_saved;
737 int val;
738 int templ;
739
740 addr &= ~0x0f;
741
742 /* Disable the automatic memory restoration from breakpoints while
743 we read our instruction bundle. Otherwise, the general restoration
744 mechanism kicks in and we would possibly remove parts of the adjacent
745 placed breakpoints. It is due to our SHADOW_CONTENTS overlapping the real
746 breakpoint instruction bits region. */
747 scoped_restore restore_memory_1
748 = make_scoped_restore_show_memory_breakpoints (1);
749 val = target_read_memory (addr, bundle_mem, BUNDLE_LEN);
750 if (val != 0)
751 return val;
752
753 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
754 for addressing the SHADOW_CONTENTS placement. */
755 shadow_slotnum = slotnum;
756
757 templ = extract_bit_field (bundle_mem, 0, 5);
758 if (template_encoding_table[templ][slotnum] == X)
759 {
760 /* X unit types can only be used in slot 2, and are actually
761 part of a 2-slot L-X instruction. We refuse to insert
762 breakpoints at this address, so there should be no reason
763 for us attempting to remove one there, except if the program's
764 code somehow got modified in memory. */
765 gdb_assert (slotnum == 2);
766 warning (_("Cannot remove breakpoint at address %s from non-existing "
767 "X-type slot, memory has changed underneath"),
768 paddress (gdbarch, bp_tgt->placed_address));
769 return -1;
770 }
771 if (template_encoding_table[templ][slotnum] == L)
772 {
773 /* L unit types can only be used in slot 1. But the breakpoint
774 was actually saved using slot 2, so update the slot number
775 accordingly. */
776 gdb_assert (slotnum == 1);
777 slotnum = 2;
778 }
779
780 gdb_assert (bp_tgt->shadow_len == BUNDLE_LEN - shadow_slotnum);
781
782 instr_breakpoint = slotN_contents (bundle_mem, slotnum);
783 if (instr_breakpoint != IA64_BREAKPOINT)
784 {
785 warning (_("Cannot remove breakpoint at address %s, "
786 "no break instruction at such address."),
787 paddress (gdbarch, bp_tgt->placed_address));
788 return -1;
789 }
790
791 /* Extract the original saved instruction from SLOTNUM normalizing its
792 bit-shift for INSTR_SAVED. */
793 memcpy (bundle_saved, bundle_mem, BUNDLE_LEN);
794 memcpy (bundle_saved + shadow_slotnum, bp_tgt->shadow_contents,
795 bp_tgt->shadow_len);
796 instr_saved = slotN_contents (bundle_saved, slotnum);
797
798 /* In BUNDLE_MEM, be careful to modify only the bits belonging to SLOTNUM
799 and not any of the other ones that are stored in SHADOW_CONTENTS. */
800 replace_slotN_contents (bundle_mem, instr_saved, slotnum);
801 val = target_write_raw_memory (addr, bundle_mem, BUNDLE_LEN);
802
803 return val;
804 }
805
806 /* Implement the breakpoint_kind_from_pc gdbarch method. */
807
808 static int
809 ia64_breakpoint_kind_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr)
810 {
811 /* A place holder of gdbarch method breakpoint_kind_from_pc. */
812 return 0;
813 }
814
815 /* As gdbarch_breakpoint_from_pc ranges have byte granularity and ia64
816 instruction slots ranges are bit-granular (41 bits) we have to provide an
817 extended range as described for ia64_memory_insert_breakpoint. We also take
818 care of preserving the `break' instruction 21-bit (or 62-bit) parameter to
819 make a match for permanent breakpoints. */
820
821 static const gdb_byte *
822 ia64_breakpoint_from_pc (struct gdbarch *gdbarch,
823 CORE_ADDR *pcptr, int *lenptr)
824 {
825 CORE_ADDR addr = *pcptr;
826 static gdb_byte bundle[BUNDLE_LEN];
827 int slotnum = (int) (*pcptr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
828 long long instr_fetched;
829 int val;
830 int templ;
831
832 if (slotnum > 2)
833 error (_("Can't insert breakpoint for slot numbers greater than 2."));
834
835 addr &= ~0x0f;
836
837 /* Enable the automatic memory restoration from breakpoints while
838 we read our instruction bundle to match bp_loc_is_permanent. */
839 {
840 scoped_restore restore_memory_0
841 = make_scoped_restore_show_memory_breakpoints (0);
842 val = target_read_memory (addr, bundle, BUNDLE_LEN);
843 }
844
845 /* The memory might be unreachable. This can happen, for instance,
846 when the user inserts a breakpoint at an invalid address. */
847 if (val != 0)
848 return NULL;
849
850 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
851 for addressing the SHADOW_CONTENTS placement. */
852 shadow_slotnum = slotnum;
853
854 /* Cover always the last byte of the bundle for the L-X slot case. */
855 *lenptr = BUNDLE_LEN - shadow_slotnum;
856
857 /* Check for L type instruction in slot 1, if present then bump up the slot
858 number to the slot 2. */
859 templ = extract_bit_field (bundle, 0, 5);
860 if (template_encoding_table[templ][slotnum] == X)
861 {
862 gdb_assert (slotnum == 2);
863 error (_("Can't insert breakpoint for non-existing slot X"));
864 }
865 if (template_encoding_table[templ][slotnum] == L)
866 {
867 gdb_assert (slotnum == 1);
868 slotnum = 2;
869 }
870
871 /* A break instruction has its all its opcode bits cleared except for
872 the parameter value. For L+X slot pair we are at the X slot (slot 2) so
873 we should not touch the L slot - the upper 41 bits of the parameter. */
874 instr_fetched = slotN_contents (bundle, slotnum);
875 instr_fetched &= 0x1003ffffc0LL;
876 replace_slotN_contents (bundle, instr_fetched, slotnum);
877
878 return bundle + shadow_slotnum;
879 }
880
881 static CORE_ADDR
882 ia64_read_pc (readable_regcache *regcache)
883 {
884 ULONGEST psr_value, pc_value;
885 int slot_num;
886
887 regcache->cooked_read (IA64_PSR_REGNUM, &psr_value);
888 regcache->cooked_read (IA64_IP_REGNUM, &pc_value);
889 slot_num = (psr_value >> 41) & 3;
890
891 return pc_value | (slot_num * SLOT_MULTIPLIER);
892 }
893
894 void
895 ia64_write_pc (struct regcache *regcache, CORE_ADDR new_pc)
896 {
897 int slot_num = (int) (new_pc & 0xf) / SLOT_MULTIPLIER;
898 ULONGEST psr_value;
899
900 regcache_cooked_read_unsigned (regcache, IA64_PSR_REGNUM, &psr_value);
901 psr_value &= ~(3LL << 41);
902 psr_value |= (ULONGEST)(slot_num & 0x3) << 41;
903
904 new_pc &= ~0xfLL;
905
906 regcache_cooked_write_unsigned (regcache, IA64_PSR_REGNUM, psr_value);
907 regcache_cooked_write_unsigned (regcache, IA64_IP_REGNUM, new_pc);
908 }
909
910 #define IS_NaT_COLLECTION_ADDR(addr) ((((addr) >> 3) & 0x3f) == 0x3f)
911
912 /* Returns the address of the slot that's NSLOTS slots away from
913 the address ADDR. NSLOTS may be positive or negative. */
914 static CORE_ADDR
915 rse_address_add(CORE_ADDR addr, int nslots)
916 {
917 CORE_ADDR new_addr;
918 int mandatory_nat_slots = nslots / 63;
919 int direction = nslots < 0 ? -1 : 1;
920
921 new_addr = addr + 8 * (nslots + mandatory_nat_slots);
922
923 if ((new_addr >> 9) != ((addr + 8 * 64 * mandatory_nat_slots) >> 9))
924 new_addr += 8 * direction;
925
926 if (IS_NaT_COLLECTION_ADDR(new_addr))
927 new_addr += 8 * direction;
928
929 return new_addr;
930 }
931
932 static enum register_status
933 ia64_pseudo_register_read (struct gdbarch *gdbarch, readable_regcache *regcache,
934 int regnum, gdb_byte *buf)
935 {
936 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
937 enum register_status status;
938
939 if (regnum >= V32_REGNUM && regnum <= V127_REGNUM)
940 {
941 #ifdef HAVE_LIBUNWIND_IA64_H
942 /* First try and use the libunwind special reg accessor,
943 otherwise fallback to standard logic. */
944 if (!libunwind_is_initialized ()
945 || libunwind_get_reg_special (gdbarch, regcache, regnum, buf) != 0)
946 #endif
947 {
948 /* The fallback position is to assume that r32-r127 are
949 found sequentially in memory starting at $bof. This
950 isn't always true, but without libunwind, this is the
951 best we can do. */
952 ULONGEST cfm;
953 ULONGEST bsp;
954 CORE_ADDR reg;
955
956 status = regcache->cooked_read (IA64_BSP_REGNUM, &bsp);
957 if (status != REG_VALID)
958 return status;
959
960 status = regcache->cooked_read (IA64_CFM_REGNUM, &cfm);
961 if (status != REG_VALID)
962 return status;
963
964 /* The bsp points at the end of the register frame so we
965 subtract the size of frame from it to get start of
966 register frame. */
967 bsp = rse_address_add (bsp, -(cfm & 0x7f));
968
969 if ((cfm & 0x7f) > regnum - V32_REGNUM)
970 {
971 ULONGEST reg_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
972 reg = read_memory_integer ((CORE_ADDR)reg_addr, 8, byte_order);
973 store_unsigned_integer (buf, register_size (gdbarch, regnum),
974 byte_order, reg);
975 }
976 else
977 store_unsigned_integer (buf, register_size (gdbarch, regnum),
978 byte_order, 0);
979 }
980 }
981 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
982 {
983 ULONGEST unatN_val;
984 ULONGEST unat;
985
986 status = regcache->cooked_read (IA64_UNAT_REGNUM, &unat);
987 if (status != REG_VALID)
988 return status;
989 unatN_val = (unat & (1LL << (regnum - IA64_NAT0_REGNUM))) != 0;
990 store_unsigned_integer (buf, register_size (gdbarch, regnum),
991 byte_order, unatN_val);
992 }
993 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
994 {
995 ULONGEST natN_val = 0;
996 ULONGEST bsp;
997 ULONGEST cfm;
998 CORE_ADDR gr_addr = 0;
999
1000 status = regcache->cooked_read (IA64_BSP_REGNUM, &bsp);
1001 if (status != REG_VALID)
1002 return status;
1003
1004 status = regcache->cooked_read (IA64_CFM_REGNUM, &cfm);
1005 if (status != REG_VALID)
1006 return status;
1007
1008 /* The bsp points at the end of the register frame so we
1009 subtract the size of frame from it to get start of register frame. */
1010 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1011
1012 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1013 gr_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1014
1015 if (gr_addr != 0)
1016 {
1017 /* Compute address of nat collection bits. */
1018 CORE_ADDR nat_addr = gr_addr | 0x1f8;
1019 ULONGEST nat_collection;
1020 int nat_bit;
1021 /* If our nat collection address is bigger than bsp, we have to get
1022 the nat collection from rnat. Otherwise, we fetch the nat
1023 collection from the computed address. */
1024 if (nat_addr >= bsp)
1025 regcache->cooked_read (IA64_RNAT_REGNUM, &nat_collection);
1026 else
1027 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
1028 nat_bit = (gr_addr >> 3) & 0x3f;
1029 natN_val = (nat_collection >> nat_bit) & 1;
1030 }
1031
1032 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1033 byte_order, natN_val);
1034 }
1035 else if (regnum == VBOF_REGNUM)
1036 {
1037 /* A virtual register frame start is provided for user convenience.
1038 It can be calculated as the bsp - sof (sizeof frame). */
1039 ULONGEST bsp, vbsp;
1040 ULONGEST cfm;
1041
1042 status = regcache->cooked_read (IA64_BSP_REGNUM, &bsp);
1043 if (status != REG_VALID)
1044 return status;
1045 status = regcache->cooked_read (IA64_CFM_REGNUM, &cfm);
1046 if (status != REG_VALID)
1047 return status;
1048
1049 /* The bsp points at the end of the register frame so we
1050 subtract the size of frame from it to get beginning of frame. */
1051 vbsp = rse_address_add (bsp, -(cfm & 0x7f));
1052 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1053 byte_order, vbsp);
1054 }
1055 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1056 {
1057 ULONGEST pr;
1058 ULONGEST cfm;
1059 ULONGEST prN_val;
1060
1061 status = regcache->cooked_read (IA64_PR_REGNUM, &pr);
1062 if (status != REG_VALID)
1063 return status;
1064 status = regcache->cooked_read (IA64_CFM_REGNUM, &cfm);
1065 if (status != REG_VALID)
1066 return status;
1067
1068 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1069 {
1070 /* Fetch predicate register rename base from current frame
1071 marker for this frame. */
1072 int rrb_pr = (cfm >> 32) & 0x3f;
1073
1074 /* Adjust the register number to account for register rotation. */
1075 regnum = VP16_REGNUM
1076 + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1077 }
1078 prN_val = (pr & (1LL << (regnum - VP0_REGNUM))) != 0;
1079 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1080 byte_order, prN_val);
1081 }
1082 else
1083 memset (buf, 0, register_size (gdbarch, regnum));
1084
1085 return REG_VALID;
1086 }
1087
1088 static void
1089 ia64_pseudo_register_write (struct gdbarch *gdbarch, struct regcache *regcache,
1090 int regnum, const gdb_byte *buf)
1091 {
1092 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1093
1094 if (regnum >= V32_REGNUM && regnum <= V127_REGNUM)
1095 {
1096 ULONGEST bsp;
1097 ULONGEST cfm;
1098 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1099 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1100
1101 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1102
1103 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1104 {
1105 ULONGEST reg_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1106 write_memory (reg_addr, buf, 8);
1107 }
1108 }
1109 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
1110 {
1111 ULONGEST unatN_val, unat, unatN_mask;
1112 regcache_cooked_read_unsigned (regcache, IA64_UNAT_REGNUM, &unat);
1113 unatN_val = extract_unsigned_integer (buf, register_size (gdbarch,
1114 regnum),
1115 byte_order);
1116 unatN_mask = (1LL << (regnum - IA64_NAT0_REGNUM));
1117 if (unatN_val == 0)
1118 unat &= ~unatN_mask;
1119 else if (unatN_val == 1)
1120 unat |= unatN_mask;
1121 regcache_cooked_write_unsigned (regcache, IA64_UNAT_REGNUM, unat);
1122 }
1123 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
1124 {
1125 ULONGEST natN_val;
1126 ULONGEST bsp;
1127 ULONGEST cfm;
1128 CORE_ADDR gr_addr = 0;
1129 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1130 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1131
1132 /* The bsp points at the end of the register frame so we
1133 subtract the size of frame from it to get start of register frame. */
1134 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1135
1136 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1137 gr_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1138
1139 natN_val = extract_unsigned_integer (buf, register_size (gdbarch,
1140 regnum),
1141 byte_order);
1142
1143 if (gr_addr != 0 && (natN_val == 0 || natN_val == 1))
1144 {
1145 /* Compute address of nat collection bits. */
1146 CORE_ADDR nat_addr = gr_addr | 0x1f8;
1147 CORE_ADDR nat_collection;
1148 int natN_bit = (gr_addr >> 3) & 0x3f;
1149 ULONGEST natN_mask = (1LL << natN_bit);
1150 /* If our nat collection address is bigger than bsp, we have to get
1151 the nat collection from rnat. Otherwise, we fetch the nat
1152 collection from the computed address. */
1153 if (nat_addr >= bsp)
1154 {
1155 regcache_cooked_read_unsigned (regcache,
1156 IA64_RNAT_REGNUM,
1157 &nat_collection);
1158 if (natN_val)
1159 nat_collection |= natN_mask;
1160 else
1161 nat_collection &= ~natN_mask;
1162 regcache_cooked_write_unsigned (regcache, IA64_RNAT_REGNUM,
1163 nat_collection);
1164 }
1165 else
1166 {
1167 gdb_byte nat_buf[8];
1168 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
1169 if (natN_val)
1170 nat_collection |= natN_mask;
1171 else
1172 nat_collection &= ~natN_mask;
1173 store_unsigned_integer (nat_buf, register_size (gdbarch, regnum),
1174 byte_order, nat_collection);
1175 write_memory (nat_addr, nat_buf, 8);
1176 }
1177 }
1178 }
1179 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1180 {
1181 ULONGEST pr;
1182 ULONGEST cfm;
1183 ULONGEST prN_val;
1184 ULONGEST prN_mask;
1185
1186 regcache_cooked_read_unsigned (regcache, IA64_PR_REGNUM, &pr);
1187 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1188
1189 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1190 {
1191 /* Fetch predicate register rename base from current frame
1192 marker for this frame. */
1193 int rrb_pr = (cfm >> 32) & 0x3f;
1194
1195 /* Adjust the register number to account for register rotation. */
1196 regnum = VP16_REGNUM
1197 + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1198 }
1199 prN_val = extract_unsigned_integer (buf, register_size (gdbarch, regnum),
1200 byte_order);
1201 prN_mask = (1LL << (regnum - VP0_REGNUM));
1202 if (prN_val == 0)
1203 pr &= ~prN_mask;
1204 else if (prN_val == 1)
1205 pr |= prN_mask;
1206 regcache_cooked_write_unsigned (regcache, IA64_PR_REGNUM, pr);
1207 }
1208 }
1209
1210 /* The ia64 needs to convert between various ieee floating-point formats
1211 and the special ia64 floating point register format. */
1212
1213 static int
1214 ia64_convert_register_p (struct gdbarch *gdbarch, int regno, struct type *type)
1215 {
1216 return (regno >= IA64_FR0_REGNUM && regno <= IA64_FR127_REGNUM
1217 && type->code () == TYPE_CODE_FLT
1218 && type != ia64_ext_type (gdbarch));
1219 }
1220
1221 static int
1222 ia64_register_to_value (frame_info_ptr frame, int regnum,
1223 struct type *valtype, gdb_byte *out,
1224 int *optimizedp, int *unavailablep)
1225 {
1226 struct gdbarch *gdbarch = get_frame_arch (frame);
1227 gdb_byte in[IA64_FP_REGISTER_SIZE];
1228
1229 /* Convert to TYPE. */
1230 auto in_view = gdb::make_array_view (in, register_size (gdbarch, regnum));
1231 frame_info_ptr next_frame = get_next_frame_sentinel_okay (frame);
1232 if (!get_frame_register_bytes (next_frame, regnum, 0, in_view, optimizedp,
1233 unavailablep))
1234 return 0;
1235
1236 target_float_convert (in, ia64_ext_type (gdbarch), out, valtype);
1237 *optimizedp = *unavailablep = 0;
1238 return 1;
1239 }
1240
1241 static void
1242 ia64_value_to_register (frame_info_ptr frame, int regnum,
1243 struct type *valtype, const gdb_byte *in)
1244 {
1245 struct gdbarch *gdbarch = get_frame_arch (frame);
1246 gdb_byte out[IA64_FP_REGISTER_SIZE];
1247 type *to_type = ia64_ext_type (gdbarch);
1248 target_float_convert (in, valtype, out, to_type);
1249 auto out_view = gdb::make_array_view (out, to_type->length ());
1250 put_frame_register (get_next_frame_sentinel_okay (frame), regnum, out_view);
1251 }
1252
1253
1254 /* Limit the number of skipped non-prologue instructions since examining
1255 of the prologue is expensive. */
1256 static int max_skip_non_prologue_insns = 40;
1257
1258 /* Given PC representing the starting address of a function, and
1259 LIM_PC which is the (sloppy) limit to which to scan when looking
1260 for a prologue, attempt to further refine this limit by using
1261 the line data in the symbol table. If successful, a better guess
1262 on where the prologue ends is returned, otherwise the previous
1263 value of lim_pc is returned. TRUST_LIMIT is a pointer to a flag
1264 which will be set to indicate whether the returned limit may be
1265 used with no further scanning in the event that the function is
1266 frameless. */
1267
1268 /* FIXME: cagney/2004-02-14: This function and logic have largely been
1269 superseded by skip_prologue_using_sal. */
1270
1271 static CORE_ADDR
1272 refine_prologue_limit (CORE_ADDR pc, CORE_ADDR lim_pc, int *trust_limit)
1273 {
1274 struct symtab_and_line prologue_sal;
1275 CORE_ADDR start_pc = pc;
1276 CORE_ADDR end_pc;
1277
1278 /* The prologue can not possibly go past the function end itself,
1279 so we can already adjust LIM_PC accordingly. */
1280 if (find_pc_partial_function (pc, NULL, NULL, &end_pc) && end_pc < lim_pc)
1281 lim_pc = end_pc;
1282
1283 /* Start off not trusting the limit. */
1284 *trust_limit = 0;
1285
1286 prologue_sal = find_pc_line (pc, 0);
1287 if (prologue_sal.line != 0)
1288 {
1289 int i;
1290 CORE_ADDR addr = prologue_sal.end;
1291
1292 /* Handle the case in which compiler's optimizer/scheduler
1293 has moved instructions into the prologue. We scan ahead
1294 in the function looking for address ranges whose corresponding
1295 line number is less than or equal to the first one that we
1296 found for the function. (It can be less than when the
1297 scheduler puts a body instruction before the first prologue
1298 instruction.) */
1299 for (i = 2 * max_skip_non_prologue_insns;
1300 i > 0 && (lim_pc == 0 || addr < lim_pc);
1301 i--)
1302 {
1303 struct symtab_and_line sal;
1304
1305 sal = find_pc_line (addr, 0);
1306 if (sal.line == 0)
1307 break;
1308 if (sal.line <= prologue_sal.line
1309 && sal.symtab == prologue_sal.symtab)
1310 {
1311 prologue_sal = sal;
1312 }
1313 addr = sal.end;
1314 }
1315
1316 if (lim_pc == 0 || prologue_sal.end < lim_pc)
1317 {
1318 lim_pc = prologue_sal.end;
1319 if (start_pc == get_pc_function_start (lim_pc))
1320 *trust_limit = 1;
1321 }
1322 }
1323 return lim_pc;
1324 }
1325
1326 #define isScratch(_regnum_) ((_regnum_) == 2 || (_regnum_) == 3 \
1327 || (8 <= (_regnum_) && (_regnum_) <= 11) \
1328 || (14 <= (_regnum_) && (_regnum_) <= 31))
1329 #define imm9(_instr_) \
1330 ( ((((_instr_) & 0x01000000000LL) ? -1 : 0) << 8) \
1331 | (((_instr_) & 0x00008000000LL) >> 20) \
1332 | (((_instr_) & 0x00000001fc0LL) >> 6))
1333
1334 /* Allocate and initialize a frame cache. */
1335
1336 static struct ia64_frame_cache *
1337 ia64_alloc_frame_cache (void)
1338 {
1339 struct ia64_frame_cache *cache;
1340 int i;
1341
1342 cache = FRAME_OBSTACK_ZALLOC (struct ia64_frame_cache);
1343
1344 /* Base address. */
1345 cache->base = 0;
1346 cache->pc = 0;
1347 cache->cfm = 0;
1348 cache->prev_cfm = 0;
1349 cache->sof = 0;
1350 cache->sol = 0;
1351 cache->sor = 0;
1352 cache->bsp = 0;
1353 cache->fp_reg = 0;
1354 cache->frameless = 1;
1355
1356 for (i = 0; i < NUM_IA64_RAW_REGS; i++)
1357 cache->saved_regs[i] = 0;
1358
1359 return cache;
1360 }
1361
1362 static CORE_ADDR
1363 examine_prologue (CORE_ADDR pc, CORE_ADDR lim_pc,
1364 frame_info_ptr this_frame,
1365 struct ia64_frame_cache *cache)
1366 {
1367 CORE_ADDR next_pc;
1368 CORE_ADDR last_prologue_pc = pc;
1369 ia64_instruction_type it;
1370 long long instr;
1371 int cfm_reg = 0;
1372 int ret_reg = 0;
1373 int fp_reg = 0;
1374 int unat_save_reg = 0;
1375 int pr_save_reg = 0;
1376 int mem_stack_frame_size = 0;
1377 int spill_reg = 0;
1378 CORE_ADDR spill_addr = 0;
1379 char instores[8];
1380 char infpstores[8];
1381 char reg_contents[256];
1382 int trust_limit;
1383 int frameless = 1;
1384 int i;
1385 CORE_ADDR addr;
1386 gdb_byte buf[8];
1387 CORE_ADDR bof, sor, sol, sof, cfm, rrb_gr;
1388
1389 memset (instores, 0, sizeof instores);
1390 memset (infpstores, 0, sizeof infpstores);
1391 memset (reg_contents, 0, sizeof reg_contents);
1392
1393 if (cache->after_prologue != 0
1394 && cache->after_prologue <= lim_pc)
1395 return cache->after_prologue;
1396
1397 lim_pc = refine_prologue_limit (pc, lim_pc, &trust_limit);
1398 next_pc = fetch_instruction (pc, &it, &instr);
1399
1400 /* We want to check if we have a recognizable function start before we
1401 look ahead for a prologue. */
1402 if (pc < lim_pc && next_pc
1403 && it == M && ((instr & 0x1ee0000003fLL) == 0x02c00000000LL))
1404 {
1405 /* alloc - start of a regular function. */
1406 int sol_bits = (int) ((instr & 0x00007f00000LL) >> 20);
1407 int sof_bits = (int) ((instr & 0x000000fe000LL) >> 13);
1408 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1409
1410 /* Verify that the current cfm matches what we think is the
1411 function start. If we have somehow jumped within a function,
1412 we do not want to interpret the prologue and calculate the
1413 addresses of various registers such as the return address.
1414 We will instead treat the frame as frameless. */
1415 if (!this_frame ||
1416 (sof_bits == (cache->cfm & 0x7f) &&
1417 sol_bits == ((cache->cfm >> 7) & 0x7f)))
1418 frameless = 0;
1419
1420 cfm_reg = rN;
1421 last_prologue_pc = next_pc;
1422 pc = next_pc;
1423 }
1424 else
1425 {
1426 /* Look for a leaf routine. */
1427 if (pc < lim_pc && next_pc
1428 && (it == I || it == M)
1429 && ((instr & 0x1ee00000000LL) == 0x10800000000LL))
1430 {
1431 /* adds rN = imm14, rM (or mov rN, rM when imm14 is 0) */
1432 int imm = (int) ((((instr & 0x01000000000LL) ? -1 : 0) << 13)
1433 | ((instr & 0x001f8000000LL) >> 20)
1434 | ((instr & 0x000000fe000LL) >> 13));
1435 int rM = (int) ((instr & 0x00007f00000LL) >> 20);
1436 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1437 int qp = (int) (instr & 0x0000000003fLL);
1438 if (qp == 0 && rN == 2 && imm == 0 && rM == 12 && fp_reg == 0)
1439 {
1440 /* mov r2, r12 - beginning of leaf routine. */
1441 fp_reg = rN;
1442 last_prologue_pc = next_pc;
1443 }
1444 }
1445
1446 /* If we don't recognize a regular function or leaf routine, we are
1447 done. */
1448 if (!fp_reg)
1449 {
1450 pc = lim_pc;
1451 if (trust_limit)
1452 last_prologue_pc = lim_pc;
1453 }
1454 }
1455
1456 /* Loop, looking for prologue instructions, keeping track of
1457 where preserved registers were spilled. */
1458 while (pc < lim_pc)
1459 {
1460 next_pc = fetch_instruction (pc, &it, &instr);
1461 if (next_pc == 0)
1462 break;
1463
1464 if (it == B && ((instr & 0x1e1f800003fLL) != 0x04000000000LL))
1465 {
1466 /* Exit loop upon hitting a non-nop branch instruction. */
1467 if (trust_limit)
1468 lim_pc = pc;
1469 break;
1470 }
1471 else if (((instr & 0x3fLL) != 0LL) &&
1472 (frameless || ret_reg != 0))
1473 {
1474 /* Exit loop upon hitting a predicated instruction if
1475 we already have the return register or if we are frameless. */
1476 if (trust_limit)
1477 lim_pc = pc;
1478 break;
1479 }
1480 else if (it == I && ((instr & 0x1eff8000000LL) == 0x00188000000LL))
1481 {
1482 /* Move from BR */
1483 int b2 = (int) ((instr & 0x0000000e000LL) >> 13);
1484 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1485 int qp = (int) (instr & 0x0000000003f);
1486
1487 if (qp == 0 && b2 == 0 && rN >= 32 && ret_reg == 0)
1488 {
1489 ret_reg = rN;
1490 last_prologue_pc = next_pc;
1491 }
1492 }
1493 else if ((it == I || it == M)
1494 && ((instr & 0x1ee00000000LL) == 0x10800000000LL))
1495 {
1496 /* adds rN = imm14, rM (or mov rN, rM when imm14 is 0) */
1497 int imm = (int) ((((instr & 0x01000000000LL) ? -1 : 0) << 13)
1498 | ((instr & 0x001f8000000LL) >> 20)
1499 | ((instr & 0x000000fe000LL) >> 13));
1500 int rM = (int) ((instr & 0x00007f00000LL) >> 20);
1501 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1502 int qp = (int) (instr & 0x0000000003fLL);
1503
1504 if (qp == 0 && rN >= 32 && imm == 0 && rM == 12 && fp_reg == 0)
1505 {
1506 /* mov rN, r12 */
1507 fp_reg = rN;
1508 last_prologue_pc = next_pc;
1509 }
1510 else if (qp == 0 && rN == 12 && rM == 12)
1511 {
1512 /* adds r12, -mem_stack_frame_size, r12 */
1513 mem_stack_frame_size -= imm;
1514 last_prologue_pc = next_pc;
1515 }
1516 else if (qp == 0 && rN == 2
1517 && ((rM == fp_reg && fp_reg != 0) || rM == 12))
1518 {
1519 CORE_ADDR saved_sp = 0;
1520 /* adds r2, spilloffset, rFramePointer
1521 or
1522 adds r2, spilloffset, r12
1523
1524 Get ready for stf.spill or st8.spill instructions.
1525 The address to start spilling at is loaded into r2.
1526 FIXME: Why r2? That's what gcc currently uses; it
1527 could well be different for other compilers. */
1528
1529 /* Hmm... whether or not this will work will depend on
1530 where the pc is. If it's still early in the prologue
1531 this'll be wrong. FIXME */
1532 if (this_frame)
1533 saved_sp = get_frame_register_unsigned (this_frame,
1534 sp_regnum);
1535 spill_addr = saved_sp
1536 + (rM == 12 ? 0 : mem_stack_frame_size)
1537 + imm;
1538 spill_reg = rN;
1539 last_prologue_pc = next_pc;
1540 }
1541 else if (qp == 0 && rM >= 32 && rM < 40 && !instores[rM-32] &&
1542 rN < 256 && imm == 0)
1543 {
1544 /* mov rN, rM where rM is an input register. */
1545 reg_contents[rN] = rM;
1546 last_prologue_pc = next_pc;
1547 }
1548 else if (frameless && qp == 0 && rN == fp_reg && imm == 0 &&
1549 rM == 2)
1550 {
1551 /* mov r12, r2 */
1552 last_prologue_pc = next_pc;
1553 break;
1554 }
1555 }
1556 else if (it == M
1557 && ( ((instr & 0x1efc0000000LL) == 0x0eec0000000LL)
1558 || ((instr & 0x1ffc8000000LL) == 0x0cec0000000LL) ))
1559 {
1560 /* stf.spill [rN] = fM, imm9
1561 or
1562 stf.spill [rN] = fM */
1563
1564 int imm = imm9(instr);
1565 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1566 int fM = (int) ((instr & 0x000000fe000LL) >> 13);
1567 int qp = (int) (instr & 0x0000000003fLL);
1568 if (qp == 0 && rN == spill_reg && spill_addr != 0
1569 && ((2 <= fM && fM <= 5) || (16 <= fM && fM <= 31)))
1570 {
1571 cache->saved_regs[IA64_FR0_REGNUM + fM] = spill_addr;
1572
1573 if ((instr & 0x1efc0000000LL) == 0x0eec0000000LL)
1574 spill_addr += imm;
1575 else
1576 spill_addr = 0; /* last one; must be done. */
1577 last_prologue_pc = next_pc;
1578 }
1579 }
1580 else if ((it == M && ((instr & 0x1eff8000000LL) == 0x02110000000LL))
1581 || (it == I && ((instr & 0x1eff8000000LL) == 0x00050000000LL)) )
1582 {
1583 /* mov.m rN = arM
1584 or
1585 mov.i rN = arM */
1586
1587 int arM = (int) ((instr & 0x00007f00000LL) >> 20);
1588 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1589 int qp = (int) (instr & 0x0000000003fLL);
1590 if (qp == 0 && isScratch (rN) && arM == 36 /* ar.unat */)
1591 {
1592 /* We have something like "mov.m r3 = ar.unat". Remember the
1593 r3 (or whatever) and watch for a store of this register... */
1594 unat_save_reg = rN;
1595 last_prologue_pc = next_pc;
1596 }
1597 }
1598 else if (it == I && ((instr & 0x1eff8000000LL) == 0x00198000000LL))
1599 {
1600 /* mov rN = pr */
1601 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1602 int qp = (int) (instr & 0x0000000003fLL);
1603 if (qp == 0 && isScratch (rN))
1604 {
1605 pr_save_reg = rN;
1606 last_prologue_pc = next_pc;
1607 }
1608 }
1609 else if (it == M
1610 && ( ((instr & 0x1ffc8000000LL) == 0x08cc0000000LL)
1611 || ((instr & 0x1efc0000000LL) == 0x0acc0000000LL)))
1612 {
1613 /* st8 [rN] = rM
1614 or
1615 st8 [rN] = rM, imm9 */
1616 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1617 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1618 int qp = (int) (instr & 0x0000000003fLL);
1619 int indirect = rM < 256 ? reg_contents[rM] : 0;
1620 if (qp == 0 && rN == spill_reg && spill_addr != 0
1621 && (rM == unat_save_reg || rM == pr_save_reg))
1622 {
1623 /* We've found a spill of either the UNAT register or the PR
1624 register. (Well, not exactly; what we've actually found is
1625 a spill of the register that UNAT or PR was moved to).
1626 Record that fact and move on... */
1627 if (rM == unat_save_reg)
1628 {
1629 /* Track UNAT register. */
1630 cache->saved_regs[IA64_UNAT_REGNUM] = spill_addr;
1631 unat_save_reg = 0;
1632 }
1633 else
1634 {
1635 /* Track PR register. */
1636 cache->saved_regs[IA64_PR_REGNUM] = spill_addr;
1637 pr_save_reg = 0;
1638 }
1639 if ((instr & 0x1efc0000000LL) == 0x0acc0000000LL)
1640 /* st8 [rN] = rM, imm9 */
1641 spill_addr += imm9(instr);
1642 else
1643 spill_addr = 0; /* Must be done spilling. */
1644 last_prologue_pc = next_pc;
1645 }
1646 else if (qp == 0 && 32 <= rM && rM < 40 && !instores[rM-32])
1647 {
1648 /* Allow up to one store of each input register. */
1649 instores[rM-32] = 1;
1650 last_prologue_pc = next_pc;
1651 }
1652 else if (qp == 0 && 32 <= indirect && indirect < 40 &&
1653 !instores[indirect-32])
1654 {
1655 /* Allow an indirect store of an input register. */
1656 instores[indirect-32] = 1;
1657 last_prologue_pc = next_pc;
1658 }
1659 }
1660 else if (it == M && ((instr & 0x1ff08000000LL) == 0x08c00000000LL))
1661 {
1662 /* One of
1663 st1 [rN] = rM
1664 st2 [rN] = rM
1665 st4 [rN] = rM
1666 st8 [rN] = rM
1667 Note that the st8 case is handled in the clause above.
1668
1669 Advance over stores of input registers. One store per input
1670 register is permitted. */
1671 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1672 int qp = (int) (instr & 0x0000000003fLL);
1673 int indirect = rM < 256 ? reg_contents[rM] : 0;
1674 if (qp == 0 && 32 <= rM && rM < 40 && !instores[rM-32])
1675 {
1676 instores[rM-32] = 1;
1677 last_prologue_pc = next_pc;
1678 }
1679 else if (qp == 0 && 32 <= indirect && indirect < 40 &&
1680 !instores[indirect-32])
1681 {
1682 /* Allow an indirect store of an input register. */
1683 instores[indirect-32] = 1;
1684 last_prologue_pc = next_pc;
1685 }
1686 }
1687 else if (it == M && ((instr & 0x1ff88000000LL) == 0x0cc80000000LL))
1688 {
1689 /* Either
1690 stfs [rN] = fM
1691 or
1692 stfd [rN] = fM
1693
1694 Advance over stores of floating point input registers. Again
1695 one store per register is permitted. */
1696 int fM = (int) ((instr & 0x000000fe000LL) >> 13);
1697 int qp = (int) (instr & 0x0000000003fLL);
1698 if (qp == 0 && 8 <= fM && fM < 16 && !infpstores[fM - 8])
1699 {
1700 infpstores[fM-8] = 1;
1701 last_prologue_pc = next_pc;
1702 }
1703 }
1704 else if (it == M
1705 && ( ((instr & 0x1ffc8000000LL) == 0x08ec0000000LL)
1706 || ((instr & 0x1efc0000000LL) == 0x0aec0000000LL)))
1707 {
1708 /* st8.spill [rN] = rM
1709 or
1710 st8.spill [rN] = rM, imm9 */
1711 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1712 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1713 int qp = (int) (instr & 0x0000000003fLL);
1714 if (qp == 0 && rN == spill_reg && 4 <= rM && rM <= 7)
1715 {
1716 /* We've found a spill of one of the preserved general purpose
1717 regs. Record the spill address and advance the spill
1718 register if appropriate. */
1719 cache->saved_regs[IA64_GR0_REGNUM + rM] = spill_addr;
1720 if ((instr & 0x1efc0000000LL) == 0x0aec0000000LL)
1721 /* st8.spill [rN] = rM, imm9 */
1722 spill_addr += imm9(instr);
1723 else
1724 spill_addr = 0; /* Done spilling. */
1725 last_prologue_pc = next_pc;
1726 }
1727 }
1728
1729 pc = next_pc;
1730 }
1731
1732 /* If not frameless and we aren't called by skip_prologue, then we need
1733 to calculate registers for the previous frame which will be needed
1734 later. */
1735
1736 if (!frameless && this_frame)
1737 {
1738 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1739 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1740
1741 /* Extract the size of the rotating portion of the stack
1742 frame and the register rename base from the current
1743 frame marker. */
1744 cfm = cache->cfm;
1745 sor = cache->sor;
1746 sof = cache->sof;
1747 sol = cache->sol;
1748 rrb_gr = (cfm >> 18) & 0x7f;
1749
1750 /* Find the bof (beginning of frame). */
1751 bof = rse_address_add (cache->bsp, -sof);
1752
1753 for (i = 0, addr = bof;
1754 i < sof;
1755 i++, addr += 8)
1756 {
1757 if (IS_NaT_COLLECTION_ADDR (addr))
1758 {
1759 addr += 8;
1760 }
1761 if (i+32 == cfm_reg)
1762 cache->saved_regs[IA64_CFM_REGNUM] = addr;
1763 if (i+32 == ret_reg)
1764 cache->saved_regs[IA64_VRAP_REGNUM] = addr;
1765 if (i+32 == fp_reg)
1766 cache->saved_regs[IA64_VFP_REGNUM] = addr;
1767 }
1768
1769 /* For the previous argument registers we require the previous bof.
1770 If we can't find the previous cfm, then we can do nothing. */
1771 cfm = 0;
1772 if (cache->saved_regs[IA64_CFM_REGNUM] != 0)
1773 {
1774 cfm = read_memory_integer (cache->saved_regs[IA64_CFM_REGNUM],
1775 8, byte_order);
1776 }
1777 else if (cfm_reg != 0)
1778 {
1779 get_frame_register (this_frame, cfm_reg, buf);
1780 cfm = extract_unsigned_integer (buf, 8, byte_order);
1781 }
1782 cache->prev_cfm = cfm;
1783
1784 if (cfm != 0)
1785 {
1786 sor = ((cfm >> 14) & 0xf) * 8;
1787 sof = (cfm & 0x7f);
1788 sol = (cfm >> 7) & 0x7f;
1789 rrb_gr = (cfm >> 18) & 0x7f;
1790
1791 /* The previous bof only requires subtraction of the sol (size of
1792 locals) due to the overlap between output and input of
1793 subsequent frames. */
1794 bof = rse_address_add (bof, -sol);
1795
1796 for (i = 0, addr = bof;
1797 i < sof;
1798 i++, addr += 8)
1799 {
1800 if (IS_NaT_COLLECTION_ADDR (addr))
1801 {
1802 addr += 8;
1803 }
1804 if (i < sor)
1805 cache->saved_regs[IA64_GR32_REGNUM
1806 + ((i + (sor - rrb_gr)) % sor)]
1807 = addr;
1808 else
1809 cache->saved_regs[IA64_GR32_REGNUM + i] = addr;
1810 }
1811
1812 }
1813 }
1814
1815 /* Try and trust the lim_pc value whenever possible. */
1816 if (trust_limit && lim_pc >= last_prologue_pc)
1817 last_prologue_pc = lim_pc;
1818
1819 cache->frameless = frameless;
1820 cache->after_prologue = last_prologue_pc;
1821 cache->mem_stack_frame_size = mem_stack_frame_size;
1822 cache->fp_reg = fp_reg;
1823
1824 return last_prologue_pc;
1825 }
1826
1827 CORE_ADDR
1828 ia64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1829 {
1830 struct ia64_frame_cache cache;
1831 cache.base = 0;
1832 cache.after_prologue = 0;
1833 cache.cfm = 0;
1834 cache.bsp = 0;
1835
1836 /* Call examine_prologue with - as third argument since we don't
1837 have a next frame pointer to send. */
1838 return examine_prologue (pc, pc+1024, 0, &cache);
1839 }
1840
1841
1842 /* Normal frames. */
1843
1844 static struct ia64_frame_cache *
1845 ia64_frame_cache (frame_info_ptr this_frame, void **this_cache)
1846 {
1847 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1848 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1849 struct ia64_frame_cache *cache;
1850 gdb_byte buf[8];
1851 CORE_ADDR cfm;
1852
1853 if (*this_cache)
1854 return (struct ia64_frame_cache *) *this_cache;
1855
1856 cache = ia64_alloc_frame_cache ();
1857 *this_cache = cache;
1858
1859 get_frame_register (this_frame, sp_regnum, buf);
1860 cache->saved_sp = extract_unsigned_integer (buf, 8, byte_order);
1861
1862 /* We always want the bsp to point to the end of frame.
1863 This way, we can always get the beginning of frame (bof)
1864 by subtracting frame size. */
1865 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
1866 cache->bsp = extract_unsigned_integer (buf, 8, byte_order);
1867
1868 get_frame_register (this_frame, IA64_PSR_REGNUM, buf);
1869
1870 get_frame_register (this_frame, IA64_CFM_REGNUM, buf);
1871 cfm = extract_unsigned_integer (buf, 8, byte_order);
1872
1873 cache->sof = (cfm & 0x7f);
1874 cache->sol = (cfm >> 7) & 0x7f;
1875 cache->sor = ((cfm >> 14) & 0xf) * 8;
1876
1877 cache->cfm = cfm;
1878
1879 cache->pc = get_frame_func (this_frame);
1880
1881 if (cache->pc != 0)
1882 examine_prologue (cache->pc, get_frame_pc (this_frame), this_frame, cache);
1883
1884 cache->base = cache->saved_sp + cache->mem_stack_frame_size;
1885
1886 return cache;
1887 }
1888
1889 static void
1890 ia64_frame_this_id (frame_info_ptr this_frame, void **this_cache,
1891 struct frame_id *this_id)
1892 {
1893 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1894 struct ia64_frame_cache *cache =
1895 ia64_frame_cache (this_frame, this_cache);
1896
1897 /* If outermost frame, mark with null frame id. */
1898 if (cache->base != 0)
1899 (*this_id) = frame_id_build_special (cache->base, cache->pc, cache->bsp);
1900 if (gdbarch_debug >= 1)
1901 gdb_printf (gdb_stdlog,
1902 "regular frame id: code %s, stack %s, "
1903 "special %s, this_frame %s\n",
1904 paddress (gdbarch, this_id->code_addr),
1905 paddress (gdbarch, this_id->stack_addr),
1906 paddress (gdbarch, cache->bsp),
1907 host_address_to_string (this_frame.get ()));
1908 }
1909
1910 static struct value *
1911 ia64_frame_prev_register (frame_info_ptr this_frame, void **this_cache,
1912 int regnum)
1913 {
1914 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1915 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1916 struct ia64_frame_cache *cache = ia64_frame_cache (this_frame, this_cache);
1917 gdb_byte buf[8];
1918
1919 gdb_assert (regnum >= 0);
1920
1921 if (!target_has_registers ())
1922 error (_("No registers."));
1923
1924 if (regnum == gdbarch_sp_regnum (gdbarch))
1925 return frame_unwind_got_constant (this_frame, regnum, cache->base);
1926
1927 else if (regnum == IA64_BSP_REGNUM)
1928 {
1929 struct value *val;
1930 CORE_ADDR prev_cfm, bsp, prev_bsp;
1931
1932 /* We want to calculate the previous bsp as the end of the previous
1933 register stack frame. This corresponds to what the hardware bsp
1934 register will be if we pop the frame back which is why we might
1935 have been called. We know the beginning of the current frame is
1936 cache->bsp - cache->sof. This value in the previous frame points
1937 to the start of the output registers. We can calculate the end of
1938 that frame by adding the size of output:
1939 (sof (size of frame) - sol (size of locals)). */
1940 val = ia64_frame_prev_register (this_frame, this_cache, IA64_CFM_REGNUM);
1941 prev_cfm = extract_unsigned_integer (val->contents_all ().data (),
1942 8, byte_order);
1943 bsp = rse_address_add (cache->bsp, -(cache->sof));
1944 prev_bsp =
1945 rse_address_add (bsp, (prev_cfm & 0x7f) - ((prev_cfm >> 7) & 0x7f));
1946
1947 return frame_unwind_got_constant (this_frame, regnum, prev_bsp);
1948 }
1949
1950 else if (regnum == IA64_CFM_REGNUM)
1951 {
1952 CORE_ADDR addr = cache->saved_regs[IA64_CFM_REGNUM];
1953
1954 if (addr != 0)
1955 return frame_unwind_got_memory (this_frame, regnum, addr);
1956
1957 if (cache->prev_cfm)
1958 return frame_unwind_got_constant (this_frame, regnum, cache->prev_cfm);
1959
1960 if (cache->frameless)
1961 return frame_unwind_got_register (this_frame, IA64_PFS_REGNUM,
1962 IA64_PFS_REGNUM);
1963 return frame_unwind_got_register (this_frame, regnum, 0);
1964 }
1965
1966 else if (regnum == IA64_VFP_REGNUM)
1967 {
1968 /* If the function in question uses an automatic register (r32-r127)
1969 for the frame pointer, it'll be found by ia64_find_saved_register()
1970 above. If the function lacks one of these frame pointers, we can
1971 still provide a value since we know the size of the frame. */
1972 return frame_unwind_got_constant (this_frame, regnum, cache->base);
1973 }
1974
1975 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1976 {
1977 struct value *pr_val;
1978 ULONGEST prN;
1979
1980 pr_val = ia64_frame_prev_register (this_frame, this_cache,
1981 IA64_PR_REGNUM);
1982 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1983 {
1984 /* Fetch predicate register rename base from current frame
1985 marker for this frame. */
1986 int rrb_pr = (cache->cfm >> 32) & 0x3f;
1987
1988 /* Adjust the register number to account for register rotation. */
1989 regnum = VP16_REGNUM + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1990 }
1991 prN = extract_bit_field (pr_val->contents_all ().data (),
1992 regnum - VP0_REGNUM, 1);
1993 return frame_unwind_got_constant (this_frame, regnum, prN);
1994 }
1995
1996 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
1997 {
1998 struct value *unat_val;
1999 ULONGEST unatN;
2000 unat_val = ia64_frame_prev_register (this_frame, this_cache,
2001 IA64_UNAT_REGNUM);
2002 unatN = extract_bit_field (unat_val->contents_all ().data (),
2003 regnum - IA64_NAT0_REGNUM, 1);
2004 return frame_unwind_got_constant (this_frame, regnum, unatN);
2005 }
2006
2007 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
2008 {
2009 int natval = 0;
2010 /* Find address of general register corresponding to nat bit we're
2011 interested in. */
2012 CORE_ADDR gr_addr;
2013
2014 gr_addr = cache->saved_regs[regnum - IA64_NAT0_REGNUM + IA64_GR0_REGNUM];
2015
2016 if (gr_addr != 0)
2017 {
2018 /* Compute address of nat collection bits. */
2019 CORE_ADDR nat_addr = gr_addr | 0x1f8;
2020 CORE_ADDR bsp;
2021 CORE_ADDR nat_collection;
2022 int nat_bit;
2023
2024 /* If our nat collection address is bigger than bsp, we have to get
2025 the nat collection from rnat. Otherwise, we fetch the nat
2026 collection from the computed address. */
2027 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2028 bsp = extract_unsigned_integer (buf, 8, byte_order);
2029 if (nat_addr >= bsp)
2030 {
2031 get_frame_register (this_frame, IA64_RNAT_REGNUM, buf);
2032 nat_collection = extract_unsigned_integer (buf, 8, byte_order);
2033 }
2034 else
2035 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
2036 nat_bit = (gr_addr >> 3) & 0x3f;
2037 natval = (nat_collection >> nat_bit) & 1;
2038 }
2039
2040 return frame_unwind_got_constant (this_frame, regnum, natval);
2041 }
2042
2043 else if (regnum == IA64_IP_REGNUM)
2044 {
2045 CORE_ADDR pc = 0;
2046 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2047
2048 if (addr != 0)
2049 {
2050 read_memory (addr, buf, register_size (gdbarch, IA64_IP_REGNUM));
2051 pc = extract_unsigned_integer (buf, 8, byte_order);
2052 }
2053 else if (cache->frameless)
2054 {
2055 get_frame_register (this_frame, IA64_BR0_REGNUM, buf);
2056 pc = extract_unsigned_integer (buf, 8, byte_order);
2057 }
2058 pc &= ~0xf;
2059 return frame_unwind_got_constant (this_frame, regnum, pc);
2060 }
2061
2062 else if (regnum == IA64_PSR_REGNUM)
2063 {
2064 /* We don't know how to get the complete previous PSR, but we need it
2065 for the slot information when we unwind the pc (pc is formed of IP
2066 register plus slot information from PSR). To get the previous
2067 slot information, we mask it off the return address. */
2068 ULONGEST slot_num = 0;
2069 CORE_ADDR pc = 0;
2070 CORE_ADDR psr = 0;
2071 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2072
2073 get_frame_register (this_frame, IA64_PSR_REGNUM, buf);
2074 psr = extract_unsigned_integer (buf, 8, byte_order);
2075
2076 if (addr != 0)
2077 {
2078 read_memory (addr, buf, register_size (gdbarch, IA64_IP_REGNUM));
2079 pc = extract_unsigned_integer (buf, 8, byte_order);
2080 }
2081 else if (cache->frameless)
2082 {
2083 get_frame_register (this_frame, IA64_BR0_REGNUM, buf);
2084 pc = extract_unsigned_integer (buf, 8, byte_order);
2085 }
2086 psr &= ~(3LL << 41);
2087 slot_num = pc & 0x3LL;
2088 psr |= (CORE_ADDR)slot_num << 41;
2089 return frame_unwind_got_constant (this_frame, regnum, psr);
2090 }
2091
2092 else if (regnum == IA64_BR0_REGNUM)
2093 {
2094 CORE_ADDR addr = cache->saved_regs[IA64_BR0_REGNUM];
2095
2096 if (addr != 0)
2097 return frame_unwind_got_memory (this_frame, regnum, addr);
2098
2099 return frame_unwind_got_constant (this_frame, regnum, 0);
2100 }
2101
2102 else if ((regnum >= IA64_GR32_REGNUM && regnum <= IA64_GR127_REGNUM)
2103 || (regnum >= V32_REGNUM && regnum <= V127_REGNUM))
2104 {
2105 CORE_ADDR addr = 0;
2106
2107 if (regnum >= V32_REGNUM)
2108 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2109 addr = cache->saved_regs[regnum];
2110 if (addr != 0)
2111 return frame_unwind_got_memory (this_frame, regnum, addr);
2112
2113 if (cache->frameless)
2114 {
2115 struct value *reg_val;
2116 CORE_ADDR prev_cfm, prev_bsp, prev_bof;
2117
2118 /* FIXME: brobecker/2008-05-01: Doesn't this seem redundant
2119 with the same code above? */
2120 if (regnum >= V32_REGNUM)
2121 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2122 reg_val = ia64_frame_prev_register (this_frame, this_cache,
2123 IA64_CFM_REGNUM);
2124 prev_cfm = extract_unsigned_integer
2125 (reg_val->contents_all ().data (), 8, byte_order);
2126 reg_val = ia64_frame_prev_register (this_frame, this_cache,
2127 IA64_BSP_REGNUM);
2128 prev_bsp = extract_unsigned_integer
2129 (reg_val->contents_all ().data (), 8, byte_order);
2130 prev_bof = rse_address_add (prev_bsp, -(prev_cfm & 0x7f));
2131
2132 addr = rse_address_add (prev_bof, (regnum - IA64_GR32_REGNUM));
2133 return frame_unwind_got_memory (this_frame, regnum, addr);
2134 }
2135
2136 return frame_unwind_got_constant (this_frame, regnum, 0);
2137 }
2138
2139 else /* All other registers. */
2140 {
2141 CORE_ADDR addr = 0;
2142
2143 if (IA64_FR32_REGNUM <= regnum && regnum <= IA64_FR127_REGNUM)
2144 {
2145 /* Fetch floating point register rename base from current
2146 frame marker for this frame. */
2147 int rrb_fr = (cache->cfm >> 25) & 0x7f;
2148
2149 /* Adjust the floating point register number to account for
2150 register rotation. */
2151 regnum = IA64_FR32_REGNUM
2152 + ((regnum - IA64_FR32_REGNUM) + rrb_fr) % 96;
2153 }
2154
2155 /* If we have stored a memory address, access the register. */
2156 addr = cache->saved_regs[regnum];
2157 if (addr != 0)
2158 return frame_unwind_got_memory (this_frame, regnum, addr);
2159 /* Otherwise, punt and get the current value of the register. */
2160 else
2161 return frame_unwind_got_register (this_frame, regnum, regnum);
2162 }
2163 }
2164
2165 static const struct frame_unwind ia64_frame_unwind =
2166 {
2167 "ia64 prologue",
2168 NORMAL_FRAME,
2169 default_frame_unwind_stop_reason,
2170 &ia64_frame_this_id,
2171 &ia64_frame_prev_register,
2172 NULL,
2173 default_frame_sniffer
2174 };
2175
2176 /* Signal trampolines. */
2177
2178 static void
2179 ia64_sigtramp_frame_init_saved_regs (frame_info_ptr this_frame,
2180 struct ia64_frame_cache *cache)
2181 {
2182 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2183 ia64_gdbarch_tdep *tdep = gdbarch_tdep<ia64_gdbarch_tdep> (gdbarch);
2184
2185 if (tdep->sigcontext_register_address)
2186 {
2187 int regno;
2188
2189 cache->saved_regs[IA64_VRAP_REGNUM]
2190 = tdep->sigcontext_register_address (gdbarch, cache->base,
2191 IA64_IP_REGNUM);
2192 cache->saved_regs[IA64_CFM_REGNUM]
2193 = tdep->sigcontext_register_address (gdbarch, cache->base,
2194 IA64_CFM_REGNUM);
2195 cache->saved_regs[IA64_PSR_REGNUM]
2196 = tdep->sigcontext_register_address (gdbarch, cache->base,
2197 IA64_PSR_REGNUM);
2198 cache->saved_regs[IA64_BSP_REGNUM]
2199 = tdep->sigcontext_register_address (gdbarch, cache->base,
2200 IA64_BSP_REGNUM);
2201 cache->saved_regs[IA64_RNAT_REGNUM]
2202 = tdep->sigcontext_register_address (gdbarch, cache->base,
2203 IA64_RNAT_REGNUM);
2204 cache->saved_regs[IA64_CCV_REGNUM]
2205 = tdep->sigcontext_register_address (gdbarch, cache->base,
2206 IA64_CCV_REGNUM);
2207 cache->saved_regs[IA64_UNAT_REGNUM]
2208 = tdep->sigcontext_register_address (gdbarch, cache->base,
2209 IA64_UNAT_REGNUM);
2210 cache->saved_regs[IA64_FPSR_REGNUM]
2211 = tdep->sigcontext_register_address (gdbarch, cache->base,
2212 IA64_FPSR_REGNUM);
2213 cache->saved_regs[IA64_PFS_REGNUM]
2214 = tdep->sigcontext_register_address (gdbarch, cache->base,
2215 IA64_PFS_REGNUM);
2216 cache->saved_regs[IA64_LC_REGNUM]
2217 = tdep->sigcontext_register_address (gdbarch, cache->base,
2218 IA64_LC_REGNUM);
2219
2220 for (regno = IA64_GR1_REGNUM; regno <= IA64_GR31_REGNUM; regno++)
2221 cache->saved_regs[regno] =
2222 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2223 for (regno = IA64_BR0_REGNUM; regno <= IA64_BR7_REGNUM; regno++)
2224 cache->saved_regs[regno] =
2225 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2226 for (regno = IA64_FR2_REGNUM; regno <= IA64_FR31_REGNUM; regno++)
2227 cache->saved_regs[regno] =
2228 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2229 }
2230 }
2231
2232 static struct ia64_frame_cache *
2233 ia64_sigtramp_frame_cache (frame_info_ptr this_frame, void **this_cache)
2234 {
2235 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2236 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2237 struct ia64_frame_cache *cache;
2238 gdb_byte buf[8];
2239
2240 if (*this_cache)
2241 return (struct ia64_frame_cache *) *this_cache;
2242
2243 cache = ia64_alloc_frame_cache ();
2244
2245 get_frame_register (this_frame, sp_regnum, buf);
2246 /* Note that frame size is hard-coded below. We cannot calculate it
2247 via prologue examination. */
2248 cache->base = extract_unsigned_integer (buf, 8, byte_order) + 16;
2249
2250 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2251 cache->bsp = extract_unsigned_integer (buf, 8, byte_order);
2252
2253 get_frame_register (this_frame, IA64_CFM_REGNUM, buf);
2254 cache->cfm = extract_unsigned_integer (buf, 8, byte_order);
2255 cache->sof = cache->cfm & 0x7f;
2256
2257 ia64_sigtramp_frame_init_saved_regs (this_frame, cache);
2258
2259 *this_cache = cache;
2260 return cache;
2261 }
2262
2263 static void
2264 ia64_sigtramp_frame_this_id (frame_info_ptr this_frame,
2265 void **this_cache, struct frame_id *this_id)
2266 {
2267 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2268 struct ia64_frame_cache *cache =
2269 ia64_sigtramp_frame_cache (this_frame, this_cache);
2270
2271 (*this_id) = frame_id_build_special (cache->base,
2272 get_frame_pc (this_frame),
2273 cache->bsp);
2274 if (gdbarch_debug >= 1)
2275 gdb_printf (gdb_stdlog,
2276 "sigtramp frame id: code %s, stack %s, "
2277 "special %s, this_frame %s\n",
2278 paddress (gdbarch, this_id->code_addr),
2279 paddress (gdbarch, this_id->stack_addr),
2280 paddress (gdbarch, cache->bsp),
2281 host_address_to_string (this_frame.get ()));
2282 }
2283
2284 static struct value *
2285 ia64_sigtramp_frame_prev_register (frame_info_ptr this_frame,
2286 void **this_cache, int regnum)
2287 {
2288 struct ia64_frame_cache *cache =
2289 ia64_sigtramp_frame_cache (this_frame, this_cache);
2290
2291 gdb_assert (regnum >= 0);
2292
2293 if (!target_has_registers ())
2294 error (_("No registers."));
2295
2296 if (regnum == IA64_IP_REGNUM)
2297 {
2298 CORE_ADDR pc = 0;
2299 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2300
2301 if (addr != 0)
2302 {
2303 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2304 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2305 pc = read_memory_unsigned_integer (addr, 8, byte_order);
2306 }
2307 pc &= ~0xf;
2308 return frame_unwind_got_constant (this_frame, regnum, pc);
2309 }
2310
2311 else if ((regnum >= IA64_GR32_REGNUM && regnum <= IA64_GR127_REGNUM)
2312 || (regnum >= V32_REGNUM && regnum <= V127_REGNUM))
2313 {
2314 CORE_ADDR addr = 0;
2315
2316 if (regnum >= V32_REGNUM)
2317 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2318 addr = cache->saved_regs[regnum];
2319 if (addr != 0)
2320 return frame_unwind_got_memory (this_frame, regnum, addr);
2321
2322 return frame_unwind_got_constant (this_frame, regnum, 0);
2323 }
2324
2325 else /* All other registers not listed above. */
2326 {
2327 CORE_ADDR addr = cache->saved_regs[regnum];
2328
2329 if (addr != 0)
2330 return frame_unwind_got_memory (this_frame, regnum, addr);
2331
2332 return frame_unwind_got_constant (this_frame, regnum, 0);
2333 }
2334 }
2335
2336 static int
2337 ia64_sigtramp_frame_sniffer (const struct frame_unwind *self,
2338 frame_info_ptr this_frame,
2339 void **this_cache)
2340 {
2341 gdbarch *arch = get_frame_arch (this_frame);
2342 ia64_gdbarch_tdep *tdep = gdbarch_tdep<ia64_gdbarch_tdep> (arch);
2343 if (tdep->pc_in_sigtramp)
2344 {
2345 CORE_ADDR pc = get_frame_pc (this_frame);
2346
2347 if (tdep->pc_in_sigtramp (pc))
2348 return 1;
2349 }
2350
2351 return 0;
2352 }
2353
2354 static const struct frame_unwind ia64_sigtramp_frame_unwind =
2355 {
2356 "ia64 sigtramp",
2357 SIGTRAMP_FRAME,
2358 default_frame_unwind_stop_reason,
2359 ia64_sigtramp_frame_this_id,
2360 ia64_sigtramp_frame_prev_register,
2361 NULL,
2362 ia64_sigtramp_frame_sniffer
2363 };
2364
2365 \f
2366
2367 static CORE_ADDR
2368 ia64_frame_base_address (frame_info_ptr this_frame, void **this_cache)
2369 {
2370 struct ia64_frame_cache *cache = ia64_frame_cache (this_frame, this_cache);
2371
2372 return cache->base;
2373 }
2374
2375 static const struct frame_base ia64_frame_base =
2376 {
2377 &ia64_frame_unwind,
2378 ia64_frame_base_address,
2379 ia64_frame_base_address,
2380 ia64_frame_base_address
2381 };
2382
2383 #ifdef HAVE_LIBUNWIND_IA64_H
2384
2385 struct ia64_unwind_table_entry
2386 {
2387 unw_word_t start_offset;
2388 unw_word_t end_offset;
2389 unw_word_t info_offset;
2390 };
2391
2392 static __inline__ uint64_t
2393 ia64_rse_slot_num (uint64_t addr)
2394 {
2395 return (addr >> 3) & 0x3f;
2396 }
2397
2398 /* Skip over a designated number of registers in the backing
2399 store, remembering every 64th position is for NAT. */
2400 static __inline__ uint64_t
2401 ia64_rse_skip_regs (uint64_t addr, long num_regs)
2402 {
2403 long delta = ia64_rse_slot_num(addr) + num_regs;
2404
2405 if (num_regs < 0)
2406 delta -= 0x3e;
2407 return addr + ((num_regs + delta/0x3f) << 3);
2408 }
2409
2410 /* Gdb ia64-libunwind-tdep callback function to convert from an ia64 gdb
2411 register number to a libunwind register number. */
2412 static int
2413 ia64_gdb2uw_regnum (int regnum)
2414 {
2415 if (regnum == sp_regnum)
2416 return UNW_IA64_SP;
2417 else if (regnum == IA64_BSP_REGNUM)
2418 return UNW_IA64_BSP;
2419 else if ((unsigned) (regnum - IA64_GR0_REGNUM) < 128)
2420 return UNW_IA64_GR + (regnum - IA64_GR0_REGNUM);
2421 else if ((unsigned) (regnum - V32_REGNUM) < 95)
2422 return UNW_IA64_GR + 32 + (regnum - V32_REGNUM);
2423 else if ((unsigned) (regnum - IA64_FR0_REGNUM) < 128)
2424 return UNW_IA64_FR + (regnum - IA64_FR0_REGNUM);
2425 else if ((unsigned) (regnum - IA64_PR0_REGNUM) < 64)
2426 return -1;
2427 else if ((unsigned) (regnum - IA64_BR0_REGNUM) < 8)
2428 return UNW_IA64_BR + (regnum - IA64_BR0_REGNUM);
2429 else if (regnum == IA64_PR_REGNUM)
2430 return UNW_IA64_PR;
2431 else if (regnum == IA64_IP_REGNUM)
2432 return UNW_REG_IP;
2433 else if (regnum == IA64_CFM_REGNUM)
2434 return UNW_IA64_CFM;
2435 else if ((unsigned) (regnum - IA64_AR0_REGNUM) < 128)
2436 return UNW_IA64_AR + (regnum - IA64_AR0_REGNUM);
2437 else if ((unsigned) (regnum - IA64_NAT0_REGNUM) < 128)
2438 return UNW_IA64_NAT + (regnum - IA64_NAT0_REGNUM);
2439 else
2440 return -1;
2441 }
2442
2443 /* Gdb ia64-libunwind-tdep callback function to convert from a libunwind
2444 register number to a ia64 gdb register number. */
2445 static int
2446 ia64_uw2gdb_regnum (int uw_regnum)
2447 {
2448 if (uw_regnum == UNW_IA64_SP)
2449 return sp_regnum;
2450 else if (uw_regnum == UNW_IA64_BSP)
2451 return IA64_BSP_REGNUM;
2452 else if ((unsigned) (uw_regnum - UNW_IA64_GR) < 32)
2453 return IA64_GR0_REGNUM + (uw_regnum - UNW_IA64_GR);
2454 else if ((unsigned) (uw_regnum - UNW_IA64_GR) < 128)
2455 return V32_REGNUM + (uw_regnum - (IA64_GR0_REGNUM + 32));
2456 else if ((unsigned) (uw_regnum - UNW_IA64_FR) < 128)
2457 return IA64_FR0_REGNUM + (uw_regnum - UNW_IA64_FR);
2458 else if ((unsigned) (uw_regnum - UNW_IA64_BR) < 8)
2459 return IA64_BR0_REGNUM + (uw_regnum - UNW_IA64_BR);
2460 else if (uw_regnum == UNW_IA64_PR)
2461 return IA64_PR_REGNUM;
2462 else if (uw_regnum == UNW_REG_IP)
2463 return IA64_IP_REGNUM;
2464 else if (uw_regnum == UNW_IA64_CFM)
2465 return IA64_CFM_REGNUM;
2466 else if ((unsigned) (uw_regnum - UNW_IA64_AR) < 128)
2467 return IA64_AR0_REGNUM + (uw_regnum - UNW_IA64_AR);
2468 else if ((unsigned) (uw_regnum - UNW_IA64_NAT) < 128)
2469 return IA64_NAT0_REGNUM + (uw_regnum - UNW_IA64_NAT);
2470 else
2471 return -1;
2472 }
2473
2474 /* Gdb ia64-libunwind-tdep callback function to reveal if register is
2475 a float register or not. */
2476 static int
2477 ia64_is_fpreg (int uw_regnum)
2478 {
2479 return unw_is_fpreg (uw_regnum);
2480 }
2481
2482 /* Libunwind callback accessor function for general registers. */
2483 static int
2484 ia64_access_reg (unw_addr_space_t as, unw_regnum_t uw_regnum, unw_word_t *val,
2485 int write, void *arg)
2486 {
2487 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2488 unw_word_t bsp, sof, cfm, psr, ip;
2489 struct frame_info *this_frame = (frame_info *) arg;
2490 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2491 ia64_gdbarch_tdep *tdep = gdbarch_tdep<ia64_gdbarch_tdep> (gdbarch);
2492
2493 /* We never call any libunwind routines that need to write registers. */
2494 gdb_assert (!write);
2495
2496 switch (uw_regnum)
2497 {
2498 case UNW_REG_IP:
2499 /* Libunwind expects to see the pc value which means the slot number
2500 from the psr must be merged with the ip word address. */
2501 ip = get_frame_register_unsigned (this_frame, IA64_IP_REGNUM);
2502 psr = get_frame_register_unsigned (this_frame, IA64_PSR_REGNUM);
2503 *val = ip | ((psr >> 41) & 0x3);
2504 break;
2505
2506 case UNW_IA64_AR_BSP:
2507 /* Libunwind expects to see the beginning of the current
2508 register frame so we must account for the fact that
2509 ptrace() will return a value for bsp that points *after*
2510 the current register frame. */
2511 bsp = get_frame_register_unsigned (this_frame, IA64_BSP_REGNUM);
2512 cfm = get_frame_register_unsigned (this_frame, IA64_CFM_REGNUM);
2513 sof = tdep->size_of_register_frame (this_frame, cfm);
2514 *val = ia64_rse_skip_regs (bsp, -sof);
2515 break;
2516
2517 case UNW_IA64_AR_BSPSTORE:
2518 /* Libunwind wants bspstore to be after the current register frame.
2519 This is what ptrace() and gdb treats as the regular bsp value. */
2520 *val = get_frame_register_unsigned (this_frame, IA64_BSP_REGNUM);
2521 break;
2522
2523 default:
2524 /* For all other registers, just unwind the value directly. */
2525 *val = get_frame_register_unsigned (this_frame, regnum);
2526 break;
2527 }
2528
2529 if (gdbarch_debug >= 1)
2530 gdb_printf (gdb_stdlog,
2531 " access_reg: from cache: %4s=%s\n",
2532 (((unsigned) regnum <= IA64_NAT127_REGNUM)
2533 ? ia64_register_names[regnum] : "r??"),
2534 paddress (gdbarch, *val));
2535 return 0;
2536 }
2537
2538 /* Libunwind callback accessor function for floating-point registers. */
2539 static int
2540 ia64_access_fpreg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2541 unw_fpreg_t *val, int write, void *arg)
2542 {
2543 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2544 frame_info_ptr this_frame = (frame_info_ptr) arg;
2545
2546 /* We never call any libunwind routines that need to write registers. */
2547 gdb_assert (!write);
2548
2549 get_frame_register (this_frame, regnum, (gdb_byte *) val);
2550
2551 return 0;
2552 }
2553
2554 /* Libunwind callback accessor function for top-level rse registers. */
2555 static int
2556 ia64_access_rse_reg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2557 unw_word_t *val, int write, void *arg)
2558 {
2559 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2560 unw_word_t bsp, sof, cfm, psr, ip;
2561 struct regcache *regcache = (struct regcache *) arg;
2562 struct gdbarch *gdbarch = regcache->arch ();
2563
2564 /* We never call any libunwind routines that need to write registers. */
2565 gdb_assert (!write);
2566
2567 switch (uw_regnum)
2568 {
2569 case UNW_REG_IP:
2570 /* Libunwind expects to see the pc value which means the slot number
2571 from the psr must be merged with the ip word address. */
2572 regcache_cooked_read_unsigned (regcache, IA64_IP_REGNUM, &ip);
2573 regcache_cooked_read_unsigned (regcache, IA64_PSR_REGNUM, &psr);
2574 *val = ip | ((psr >> 41) & 0x3);
2575 break;
2576
2577 case UNW_IA64_AR_BSP:
2578 /* Libunwind expects to see the beginning of the current
2579 register frame so we must account for the fact that
2580 ptrace() will return a value for bsp that points *after*
2581 the current register frame. */
2582 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
2583 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
2584 sof = (cfm & 0x7f);
2585 *val = ia64_rse_skip_regs (bsp, -sof);
2586 break;
2587
2588 case UNW_IA64_AR_BSPSTORE:
2589 /* Libunwind wants bspstore to be after the current register frame.
2590 This is what ptrace() and gdb treats as the regular bsp value. */
2591 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, val);
2592 break;
2593
2594 default:
2595 /* For all other registers, just unwind the value directly. */
2596 regcache_cooked_read_unsigned (regcache, regnum, val);
2597 break;
2598 }
2599
2600 if (gdbarch_debug >= 1)
2601 gdb_printf (gdb_stdlog,
2602 " access_rse_reg: from cache: %4s=%s\n",
2603 (((unsigned) regnum <= IA64_NAT127_REGNUM)
2604 ? ia64_register_names[regnum] : "r??"),
2605 paddress (gdbarch, *val));
2606
2607 return 0;
2608 }
2609
2610 /* Libunwind callback accessor function for top-level fp registers. */
2611 static int
2612 ia64_access_rse_fpreg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2613 unw_fpreg_t *val, int write, void *arg)
2614 {
2615 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2616 struct regcache *regcache = (struct regcache *) arg;
2617
2618 /* We never call any libunwind routines that need to write registers. */
2619 gdb_assert (!write);
2620
2621 regcache->cooked_read (regnum, (gdb_byte *) val);
2622
2623 return 0;
2624 }
2625
2626 /* Libunwind callback accessor function for accessing memory. */
2627 static int
2628 ia64_access_mem (unw_addr_space_t as,
2629 unw_word_t addr, unw_word_t *val,
2630 int write, void *arg)
2631 {
2632 if (addr - KERNEL_START < ktab_size)
2633 {
2634 unw_word_t *laddr = (unw_word_t*) ((char *) ktab
2635 + (addr - KERNEL_START));
2636
2637 if (write)
2638 *laddr = *val;
2639 else
2640 *val = *laddr;
2641 return 0;
2642 }
2643
2644 /* XXX do we need to normalize byte-order here? */
2645 if (write)
2646 return target_write_memory (addr, (gdb_byte *) val, sizeof (unw_word_t));
2647 else
2648 return target_read_memory (addr, (gdb_byte *) val, sizeof (unw_word_t));
2649 }
2650
2651 /* Call low-level function to access the kernel unwind table. */
2652 static std::optional<gdb::byte_vector>
2653 getunwind_table ()
2654 {
2655 /* FIXME drow/2005-09-10: This code used to call
2656 ia64_linux_xfer_unwind_table directly to fetch the unwind table
2657 for the currently running ia64-linux kernel. That data should
2658 come from the core file and be accessed via the auxv vector; if
2659 we want to preserve fall back to the running kernel's table, then
2660 we should find a way to override the corefile layer's
2661 xfer_partial method. */
2662
2663 return target_read_alloc (current_inferior ()->top_target (),
2664 TARGET_OBJECT_UNWIND_TABLE, NULL);
2665 }
2666
2667 /* Get the kernel unwind table. */
2668 static int
2669 get_kernel_table (unw_word_t ip, unw_dyn_info_t *di)
2670 {
2671 static struct ia64_table_entry *etab;
2672
2673 if (!ktab)
2674 {
2675 ktab_buf = getunwind_table ();
2676 if (!ktab_buf)
2677 return -UNW_ENOINFO;
2678
2679 ktab = (struct ia64_table_entry *) ktab_buf->data ();
2680 ktab_size = ktab_buf->size ();
2681
2682 for (etab = ktab; etab->start_offset; ++etab)
2683 etab->info_offset += KERNEL_START;
2684 }
2685
2686 if (ip < ktab[0].start_offset || ip >= etab[-1].end_offset)
2687 return -UNW_ENOINFO;
2688
2689 di->format = UNW_INFO_FORMAT_TABLE;
2690 di->gp = 0;
2691 di->start_ip = ktab[0].start_offset;
2692 di->end_ip = etab[-1].end_offset;
2693 di->u.ti.name_ptr = (unw_word_t) "<kernel>";
2694 di->u.ti.segbase = 0;
2695 di->u.ti.table_len = ((char *) etab - (char *) ktab) / sizeof (unw_word_t);
2696 di->u.ti.table_data = (unw_word_t *) ktab;
2697
2698 if (gdbarch_debug >= 1)
2699 gdb_printf (gdb_stdlog, "get_kernel_table: found table `%s': "
2700 "segbase=%s, length=%s, gp=%s\n",
2701 (char *) di->u.ti.name_ptr,
2702 hex_string (di->u.ti.segbase),
2703 pulongest (di->u.ti.table_len),
2704 hex_string (di->gp));
2705 return 0;
2706 }
2707
2708 /* Find the unwind table entry for a specified address. */
2709 static int
2710 ia64_find_unwind_table (struct objfile *objfile, unw_word_t ip,
2711 unw_dyn_info_t *dip, void **buf)
2712 {
2713 Elf_Internal_Phdr *phdr, *p_text = NULL, *p_unwind = NULL;
2714 Elf_Internal_Ehdr *ehdr;
2715 unw_word_t segbase = 0;
2716 CORE_ADDR load_base;
2717 bfd *bfd;
2718 int i;
2719
2720 bfd = objfile->obfd;
2721
2722 ehdr = elf_tdata (bfd)->elf_header;
2723 phdr = elf_tdata (bfd)->phdr;
2724
2725 load_base = objfile->text_section_offset ();
2726
2727 for (i = 0; i < ehdr->e_phnum; ++i)
2728 {
2729 switch (phdr[i].p_type)
2730 {
2731 case PT_LOAD:
2732 if ((unw_word_t) (ip - load_base - phdr[i].p_vaddr)
2733 < phdr[i].p_memsz)
2734 p_text = phdr + i;
2735 break;
2736
2737 case PT_IA_64_UNWIND:
2738 p_unwind = phdr + i;
2739 break;
2740
2741 default:
2742 break;
2743 }
2744 }
2745
2746 if (!p_text || !p_unwind)
2747 return -UNW_ENOINFO;
2748
2749 /* Verify that the segment that contains the IP also contains
2750 the static unwind table. If not, we may be in the Linux kernel's
2751 DSO gate page in which case the unwind table is another segment.
2752 Otherwise, we are dealing with runtime-generated code, for which we
2753 have no info here. */
2754 segbase = p_text->p_vaddr + load_base;
2755
2756 if ((p_unwind->p_vaddr - p_text->p_vaddr) >= p_text->p_memsz)
2757 {
2758 int ok = 0;
2759 for (i = 0; i < ehdr->e_phnum; ++i)
2760 {
2761 if (phdr[i].p_type == PT_LOAD
2762 && (p_unwind->p_vaddr - phdr[i].p_vaddr) < phdr[i].p_memsz)
2763 {
2764 ok = 1;
2765 /* Get the segbase from the section containing the
2766 libunwind table. */
2767 segbase = phdr[i].p_vaddr + load_base;
2768 }
2769 }
2770 if (!ok)
2771 return -UNW_ENOINFO;
2772 }
2773
2774 dip->start_ip = p_text->p_vaddr + load_base;
2775 dip->end_ip = dip->start_ip + p_text->p_memsz;
2776 dip->gp = ia64_find_global_pointer (objfile->arch (), ip);
2777 dip->format = UNW_INFO_FORMAT_REMOTE_TABLE;
2778 dip->u.rti.name_ptr = (unw_word_t) bfd_get_filename (bfd);
2779 dip->u.rti.segbase = segbase;
2780 dip->u.rti.table_len = p_unwind->p_memsz / sizeof (unw_word_t);
2781 dip->u.rti.table_data = p_unwind->p_vaddr + load_base;
2782
2783 return 0;
2784 }
2785
2786 /* Libunwind callback accessor function to acquire procedure unwind-info. */
2787 static int
2788 ia64_find_proc_info_x (unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
2789 int need_unwind_info, void *arg)
2790 {
2791 struct obj_section *sec = find_pc_section (ip);
2792 unw_dyn_info_t di;
2793 int ret;
2794 void *buf = NULL;
2795
2796 if (!sec)
2797 {
2798 /* XXX This only works if the host and the target architecture are
2799 both ia64 and if the have (more or less) the same kernel
2800 version. */
2801 if (get_kernel_table (ip, &di) < 0)
2802 return -UNW_ENOINFO;
2803
2804 if (gdbarch_debug >= 1)
2805 gdb_printf (gdb_stdlog, "ia64_find_proc_info_x: %s -> "
2806 "(name=`%s',segbase=%s,start=%s,end=%s,gp=%s,"
2807 "length=%s,data=%s)\n",
2808 hex_string (ip), (char *)di.u.ti.name_ptr,
2809 hex_string (di.u.ti.segbase),
2810 hex_string (di.start_ip), hex_string (di.end_ip),
2811 hex_string (di.gp),
2812 pulongest (di.u.ti.table_len),
2813 hex_string ((CORE_ADDR)di.u.ti.table_data));
2814 }
2815 else
2816 {
2817 ret = ia64_find_unwind_table (sec->objfile, ip, &di, &buf);
2818 if (ret < 0)
2819 return ret;
2820
2821 if (gdbarch_debug >= 1)
2822 gdb_printf (gdb_stdlog, "ia64_find_proc_info_x: %s -> "
2823 "(name=`%s',segbase=%s,start=%s,end=%s,gp=%s,"
2824 "length=%s,data=%s)\n",
2825 hex_string (ip), (char *)di.u.rti.name_ptr,
2826 hex_string (di.u.rti.segbase),
2827 hex_string (di.start_ip), hex_string (di.end_ip),
2828 hex_string (di.gp),
2829 pulongest (di.u.rti.table_len),
2830 hex_string (di.u.rti.table_data));
2831 }
2832
2833 ret = libunwind_search_unwind_table (&as, ip, &di, pi, need_unwind_info,
2834 arg);
2835
2836 /* We no longer need the dyn info storage so free it. */
2837 xfree (buf);
2838
2839 return ret;
2840 }
2841
2842 /* Libunwind callback accessor function for cleanup. */
2843 static void
2844 ia64_put_unwind_info (unw_addr_space_t as,
2845 unw_proc_info_t *pip, void *arg)
2846 {
2847 /* Nothing required for now. */
2848 }
2849
2850 /* Libunwind callback accessor function to get head of the dynamic
2851 unwind-info registration list. */
2852 static int
2853 ia64_get_dyn_info_list (unw_addr_space_t as,
2854 unw_word_t *dilap, void *arg)
2855 {
2856 struct obj_section *text_sec;
2857 unw_word_t ip, addr;
2858 unw_dyn_info_t di;
2859 int ret;
2860
2861 if (!libunwind_is_initialized ())
2862 return -UNW_ENOINFO;
2863
2864 for (objfile *objfile : current_program_space->objfiles ())
2865 {
2866 void *buf = NULL;
2867
2868 text_sec = objfile->sections + SECT_OFF_TEXT (objfile);
2869 ip = text_sec->addr ();
2870 ret = ia64_find_unwind_table (objfile, ip, &di, &buf);
2871 if (ret >= 0)
2872 {
2873 addr = libunwind_find_dyn_list (as, &di, arg);
2874 /* We no longer need the dyn info storage so free it. */
2875 xfree (buf);
2876
2877 if (addr)
2878 {
2879 if (gdbarch_debug >= 1)
2880 gdb_printf (gdb_stdlog,
2881 "dynamic unwind table in objfile %s "
2882 "at %s (gp=%s)\n",
2883 bfd_get_filename (objfile->obfd),
2884 hex_string (addr), hex_string (di.gp));
2885 *dilap = addr;
2886 return 0;
2887 }
2888 }
2889 }
2890 return -UNW_ENOINFO;
2891 }
2892
2893
2894 /* Frame interface functions for libunwind. */
2895
2896 static void
2897 ia64_libunwind_frame_this_id (frame_info_ptr this_frame, void **this_cache,
2898 struct frame_id *this_id)
2899 {
2900 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2901 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2902 struct frame_id id = outer_frame_id;
2903 gdb_byte buf[8];
2904 CORE_ADDR bsp;
2905
2906 libunwind_frame_this_id (this_frame, this_cache, &id);
2907 if (id == outer_frame_id)
2908 {
2909 (*this_id) = outer_frame_id;
2910 return;
2911 }
2912
2913 /* We must add the bsp as the special address for frame comparison
2914 purposes. */
2915 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2916 bsp = extract_unsigned_integer (buf, 8, byte_order);
2917
2918 (*this_id) = frame_id_build_special (id.stack_addr, id.code_addr, bsp);
2919
2920 if (gdbarch_debug >= 1)
2921 gdb_printf (gdb_stdlog,
2922 "libunwind frame id: code %s, stack %s, "
2923 "special %s, this_frame %s\n",
2924 paddress (gdbarch, id.code_addr),
2925 paddress (gdbarch, id.stack_addr),
2926 paddress (gdbarch, bsp),
2927 host_address_to_string (this_frame));
2928 }
2929
2930 static struct value *
2931 ia64_libunwind_frame_prev_register (frame_info_ptr this_frame,
2932 void **this_cache, int regnum)
2933 {
2934 int reg = regnum;
2935 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2936 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2937 struct value *val;
2938
2939 if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
2940 reg = IA64_PR_REGNUM;
2941 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
2942 reg = IA64_UNAT_REGNUM;
2943
2944 /* Let libunwind do most of the work. */
2945 val = libunwind_frame_prev_register (this_frame, this_cache, reg);
2946
2947 if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
2948 {
2949 ULONGEST prN_val;
2950
2951 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
2952 {
2953 int rrb_pr = 0;
2954 ULONGEST cfm;
2955
2956 /* Fetch predicate register rename base from current frame
2957 marker for this frame. */
2958 cfm = get_frame_register_unsigned (this_frame, IA64_CFM_REGNUM);
2959 rrb_pr = (cfm >> 32) & 0x3f;
2960
2961 /* Adjust the register number to account for register rotation. */
2962 regnum = VP16_REGNUM + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
2963 }
2964 prN_val = extract_bit_field (val->contents_all ().data (),
2965 regnum - VP0_REGNUM, 1);
2966 return frame_unwind_got_constant (this_frame, regnum, prN_val);
2967 }
2968
2969 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
2970 {
2971 ULONGEST unatN_val;
2972
2973 unatN_val = extract_bit_field (val->contents_all ().data (),
2974 regnum - IA64_NAT0_REGNUM, 1);
2975 return frame_unwind_got_constant (this_frame, regnum, unatN_val);
2976 }
2977
2978 else if (regnum == IA64_BSP_REGNUM)
2979 {
2980 struct value *cfm_val;
2981 CORE_ADDR prev_bsp, prev_cfm;
2982
2983 /* We want to calculate the previous bsp as the end of the previous
2984 register stack frame. This corresponds to what the hardware bsp
2985 register will be if we pop the frame back which is why we might
2986 have been called. We know that libunwind will pass us back the
2987 beginning of the current frame so we should just add sof to it. */
2988 prev_bsp = extract_unsigned_integer (val->contents_all ().data (),
2989 8, byte_order);
2990 cfm_val = libunwind_frame_prev_register (this_frame, this_cache,
2991 IA64_CFM_REGNUM);
2992 prev_cfm = extract_unsigned_integer (cfm_val->contents_all ().data (),
2993 8, byte_order);
2994 prev_bsp = rse_address_add (prev_bsp, (prev_cfm & 0x7f));
2995
2996 return frame_unwind_got_constant (this_frame, regnum, prev_bsp);
2997 }
2998 else
2999 return val;
3000 }
3001
3002 static int
3003 ia64_libunwind_frame_sniffer (const struct frame_unwind *self,
3004 frame_info_ptr this_frame,
3005 void **this_cache)
3006 {
3007 if (libunwind_is_initialized ()
3008 && libunwind_frame_sniffer (self, this_frame, this_cache))
3009 return 1;
3010
3011 return 0;
3012 }
3013
3014 static const struct frame_unwind ia64_libunwind_frame_unwind =
3015 {
3016 "ia64 libunwind",
3017 NORMAL_FRAME,
3018 default_frame_unwind_stop_reason,
3019 ia64_libunwind_frame_this_id,
3020 ia64_libunwind_frame_prev_register,
3021 NULL,
3022 ia64_libunwind_frame_sniffer,
3023 libunwind_frame_dealloc_cache
3024 };
3025
3026 static void
3027 ia64_libunwind_sigtramp_frame_this_id (frame_info_ptr this_frame,
3028 void **this_cache,
3029 struct frame_id *this_id)
3030 {
3031 struct gdbarch *gdbarch = get_frame_arch (this_frame);
3032 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3033 gdb_byte buf[8];
3034 CORE_ADDR bsp;
3035 struct frame_id id = outer_frame_id;
3036
3037 libunwind_frame_this_id (this_frame, this_cache, &id);
3038 if (id == outer_frame_id)
3039 {
3040 (*this_id) = outer_frame_id;
3041 return;
3042 }
3043
3044 /* We must add the bsp as the special address for frame comparison
3045 purposes. */
3046 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
3047 bsp = extract_unsigned_integer (buf, 8, byte_order);
3048
3049 /* For a sigtramp frame, we don't make the check for previous ip being 0. */
3050 (*this_id) = frame_id_build_special (id.stack_addr, id.code_addr, bsp);
3051
3052 if (gdbarch_debug >= 1)
3053 gdb_printf (gdb_stdlog,
3054 "libunwind sigtramp frame id: code %s, "
3055 "stack %s, special %s, this_frame %s\n",
3056 paddress (gdbarch, id.code_addr),
3057 paddress (gdbarch, id.stack_addr),
3058 paddress (gdbarch, bsp),
3059 host_address_to_string (this_frame));
3060 }
3061
3062 static struct value *
3063 ia64_libunwind_sigtramp_frame_prev_register (frame_info_ptr this_frame,
3064 void **this_cache, int regnum)
3065 {
3066 struct gdbarch *gdbarch = get_frame_arch (this_frame);
3067 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3068 struct value *prev_ip_val;
3069 CORE_ADDR prev_ip;
3070
3071 /* If the previous frame pc value is 0, then we want to use the SIGCONTEXT
3072 method of getting previous registers. */
3073 prev_ip_val = libunwind_frame_prev_register (this_frame, this_cache,
3074 IA64_IP_REGNUM);
3075 prev_ip = extract_unsigned_integer (prev_ip_val->contents_all ().data (),
3076 8, byte_order);
3077
3078 if (prev_ip == 0)
3079 {
3080 void *tmp_cache = NULL;
3081 return ia64_sigtramp_frame_prev_register (this_frame, &tmp_cache,
3082 regnum);
3083 }
3084 else
3085 return ia64_libunwind_frame_prev_register (this_frame, this_cache, regnum);
3086 }
3087
3088 static int
3089 ia64_libunwind_sigtramp_frame_sniffer (const struct frame_unwind *self,
3090 frame_info_ptr this_frame,
3091 void **this_cache)
3092 {
3093 if (libunwind_is_initialized ())
3094 {
3095 if (libunwind_sigtramp_frame_sniffer (self, this_frame, this_cache))
3096 return 1;
3097 return 0;
3098 }
3099 else
3100 return ia64_sigtramp_frame_sniffer (self, this_frame, this_cache);
3101 }
3102
3103 static const struct frame_unwind ia64_libunwind_sigtramp_frame_unwind =
3104 {
3105 "ia64 libunwind sigtramp",
3106 SIGTRAMP_FRAME,
3107 default_frame_unwind_stop_reason,
3108 ia64_libunwind_sigtramp_frame_this_id,
3109 ia64_libunwind_sigtramp_frame_prev_register,
3110 NULL,
3111 ia64_libunwind_sigtramp_frame_sniffer
3112 };
3113
3114 /* Set of libunwind callback acccessor functions. */
3115 unw_accessors_t ia64_unw_accessors =
3116 {
3117 ia64_find_proc_info_x,
3118 ia64_put_unwind_info,
3119 ia64_get_dyn_info_list,
3120 ia64_access_mem,
3121 ia64_access_reg,
3122 ia64_access_fpreg,
3123 /* resume */
3124 /* get_proc_name */
3125 };
3126
3127 /* Set of special libunwind callback acccessor functions specific for accessing
3128 the rse registers. At the top of the stack, we want libunwind to figure out
3129 how to read r32 - r127. Though usually they are found sequentially in
3130 memory starting from $bof, this is not always true. */
3131 unw_accessors_t ia64_unw_rse_accessors =
3132 {
3133 ia64_find_proc_info_x,
3134 ia64_put_unwind_info,
3135 ia64_get_dyn_info_list,
3136 ia64_access_mem,
3137 ia64_access_rse_reg,
3138 ia64_access_rse_fpreg,
3139 /* resume */
3140 /* get_proc_name */
3141 };
3142
3143 /* Set of ia64-libunwind-tdep gdb callbacks and data for generic
3144 ia64-libunwind-tdep code to use. */
3145 struct libunwind_descr ia64_libunwind_descr =
3146 {
3147 ia64_gdb2uw_regnum,
3148 ia64_uw2gdb_regnum,
3149 ia64_is_fpreg,
3150 &ia64_unw_accessors,
3151 &ia64_unw_rse_accessors,
3152 };
3153
3154 #endif /* HAVE_LIBUNWIND_IA64_H */
3155
3156 static int
3157 ia64_use_struct_convention (struct type *type)
3158 {
3159 struct type *float_elt_type;
3160
3161 /* Don't use the struct convention for anything but structure,
3162 union, or array types. */
3163 if (!(type->code () == TYPE_CODE_STRUCT
3164 || type->code () == TYPE_CODE_UNION
3165 || type->code () == TYPE_CODE_ARRAY))
3166 return 0;
3167
3168 /* HFAs are structures (or arrays) consisting entirely of floating
3169 point values of the same length. Up to 8 of these are returned
3170 in registers. Don't use the struct convention when this is the
3171 case. */
3172 float_elt_type = is_float_or_hfa_type (type);
3173 if (float_elt_type != NULL
3174 && type->length () / float_elt_type->length () <= 8)
3175 return 0;
3176
3177 /* Other structs of length 32 or less are returned in r8-r11.
3178 Don't use the struct convention for those either. */
3179 return type->length () > 32;
3180 }
3181
3182 /* Return non-zero if TYPE is a structure or union type. */
3183
3184 static int
3185 ia64_struct_type_p (const struct type *type)
3186 {
3187 return (type->code () == TYPE_CODE_STRUCT
3188 || type->code () == TYPE_CODE_UNION);
3189 }
3190
3191 static void
3192 ia64_extract_return_value (struct type *type, struct regcache *regcache,
3193 gdb_byte *valbuf)
3194 {
3195 struct gdbarch *gdbarch = regcache->arch ();
3196 struct type *float_elt_type;
3197
3198 float_elt_type = is_float_or_hfa_type (type);
3199 if (float_elt_type != NULL)
3200 {
3201 gdb_byte from[IA64_FP_REGISTER_SIZE];
3202 int offset = 0;
3203 int regnum = IA64_FR8_REGNUM;
3204 int n = type->length () / float_elt_type->length ();
3205
3206 while (n-- > 0)
3207 {
3208 regcache->cooked_read (regnum, from);
3209 target_float_convert (from, ia64_ext_type (gdbarch),
3210 valbuf + offset, float_elt_type);
3211 offset += float_elt_type->length ();
3212 regnum++;
3213 }
3214 }
3215 else if (!ia64_struct_type_p (type) && type->length () < 8)
3216 {
3217 /* This is an integral value, and its size is less than 8 bytes.
3218 These values are LSB-aligned, so extract the relevant bytes,
3219 and copy them into VALBUF. */
3220 /* brobecker/2005-12-30: Actually, all integral values are LSB aligned,
3221 so I suppose we should also add handling here for integral values
3222 whose size is greater than 8. But I wasn't able to create such
3223 a type, neither in C nor in Ada, so not worrying about these yet. */
3224 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3225 ULONGEST val;
3226
3227 regcache_cooked_read_unsigned (regcache, IA64_GR8_REGNUM, &val);
3228 store_unsigned_integer (valbuf, type->length (), byte_order, val);
3229 }
3230 else
3231 {
3232 ULONGEST val;
3233 int offset = 0;
3234 int regnum = IA64_GR8_REGNUM;
3235 int reglen = register_type (gdbarch, IA64_GR8_REGNUM)->length ();
3236 int n = type->length () / reglen;
3237 int m = type->length () % reglen;
3238
3239 while (n-- > 0)
3240 {
3241 ULONGEST regval;
3242 regcache_cooked_read_unsigned (regcache, regnum, &regval);
3243 memcpy ((char *)valbuf + offset, &regval, reglen);
3244 offset += reglen;
3245 regnum++;
3246 }
3247
3248 if (m)
3249 {
3250 regcache_cooked_read_unsigned (regcache, regnum, &val);
3251 memcpy ((char *)valbuf + offset, &val, m);
3252 }
3253 }
3254 }
3255
3256 static void
3257 ia64_store_return_value (struct type *type, struct regcache *regcache,
3258 const gdb_byte *valbuf)
3259 {
3260 struct gdbarch *gdbarch = regcache->arch ();
3261 struct type *float_elt_type;
3262
3263 float_elt_type = is_float_or_hfa_type (type);
3264 if (float_elt_type != NULL)
3265 {
3266 gdb_byte to[IA64_FP_REGISTER_SIZE];
3267 int offset = 0;
3268 int regnum = IA64_FR8_REGNUM;
3269 int n = type->length () / float_elt_type->length ();
3270
3271 while (n-- > 0)
3272 {
3273 target_float_convert (valbuf + offset, float_elt_type,
3274 to, ia64_ext_type (gdbarch));
3275 regcache->cooked_write (regnum, to);
3276 offset += float_elt_type->length ();
3277 regnum++;
3278 }
3279 }
3280 else
3281 {
3282 int offset = 0;
3283 int regnum = IA64_GR8_REGNUM;
3284 int reglen = register_type (gdbarch, IA64_GR8_REGNUM)->length ();
3285 int n = type->length () / reglen;
3286 int m = type->length () % reglen;
3287
3288 while (n-- > 0)
3289 {
3290 ULONGEST val;
3291 memcpy (&val, (char *)valbuf + offset, reglen);
3292 regcache_cooked_write_unsigned (regcache, regnum, val);
3293 offset += reglen;
3294 regnum++;
3295 }
3296
3297 if (m)
3298 {
3299 ULONGEST val;
3300 memcpy (&val, (char *)valbuf + offset, m);
3301 regcache_cooked_write_unsigned (regcache, regnum, val);
3302 }
3303 }
3304 }
3305
3306 static enum return_value_convention
3307 ia64_return_value (struct gdbarch *gdbarch, struct value *function,
3308 struct type *valtype, struct regcache *regcache,
3309 gdb_byte *readbuf, const gdb_byte *writebuf)
3310 {
3311 int struct_return = ia64_use_struct_convention (valtype);
3312
3313 if (writebuf != NULL)
3314 {
3315 gdb_assert (!struct_return);
3316 ia64_store_return_value (valtype, regcache, writebuf);
3317 }
3318
3319 if (readbuf != NULL)
3320 {
3321 gdb_assert (!struct_return);
3322 ia64_extract_return_value (valtype, regcache, readbuf);
3323 }
3324
3325 if (struct_return)
3326 return RETURN_VALUE_STRUCT_CONVENTION;
3327 else
3328 return RETURN_VALUE_REGISTER_CONVENTION;
3329 }
3330
3331 static int
3332 is_float_or_hfa_type_recurse (struct type *t, struct type **etp)
3333 {
3334 switch (t->code ())
3335 {
3336 case TYPE_CODE_FLT:
3337 if (*etp)
3338 return (*etp)->length () == t->length ();
3339 else
3340 {
3341 *etp = t;
3342 return 1;
3343 }
3344 break;
3345 case TYPE_CODE_ARRAY:
3346 return
3347 is_float_or_hfa_type_recurse (check_typedef (t->target_type ()),
3348 etp);
3349 break;
3350 case TYPE_CODE_STRUCT:
3351 {
3352 int i;
3353
3354 for (i = 0; i < t->num_fields (); i++)
3355 if (!is_float_or_hfa_type_recurse
3356 (check_typedef (t->field (i).type ()), etp))
3357 return 0;
3358 return 1;
3359 }
3360 break;
3361 default:
3362 break;
3363 }
3364
3365 return 0;
3366 }
3367
3368 /* Determine if the given type is one of the floating point types or
3369 and HFA (which is a struct, array, or combination thereof whose
3370 bottom-most elements are all of the same floating point type). */
3371
3372 static struct type *
3373 is_float_or_hfa_type (struct type *t)
3374 {
3375 struct type *et = 0;
3376
3377 return is_float_or_hfa_type_recurse (t, &et) ? et : 0;
3378 }
3379
3380
3381 /* Return 1 if the alignment of T is such that the next even slot
3382 should be used. Return 0, if the next available slot should
3383 be used. (See section 8.5.1 of the IA-64 Software Conventions
3384 and Runtime manual). */
3385
3386 static int
3387 slot_alignment_is_next_even (struct type *t)
3388 {
3389 switch (t->code ())
3390 {
3391 case TYPE_CODE_INT:
3392 case TYPE_CODE_FLT:
3393 if (t->length () > 8)
3394 return 1;
3395 else
3396 return 0;
3397 case TYPE_CODE_ARRAY:
3398 return
3399 slot_alignment_is_next_even (check_typedef (t->target_type ()));
3400 case TYPE_CODE_STRUCT:
3401 {
3402 int i;
3403
3404 for (i = 0; i < t->num_fields (); i++)
3405 if (slot_alignment_is_next_even
3406 (check_typedef (t->field (i).type ())))
3407 return 1;
3408 return 0;
3409 }
3410 default:
3411 return 0;
3412 }
3413 }
3414
3415 /* Attempt to find (and return) the global pointer for the given
3416 function.
3417
3418 This is a rather nasty bit of code searchs for the .dynamic section
3419 in the objfile corresponding to the pc of the function we're trying
3420 to call. Once it finds the addresses at which the .dynamic section
3421 lives in the child process, it scans the Elf64_Dyn entries for a
3422 DT_PLTGOT tag. If it finds one of these, the corresponding
3423 d_un.d_ptr value is the global pointer. */
3424
3425 static CORE_ADDR
3426 ia64_find_global_pointer_from_dynamic_section (struct gdbarch *gdbarch,
3427 CORE_ADDR faddr)
3428 {
3429 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3430 struct obj_section *faddr_sect;
3431
3432 faddr_sect = find_pc_section (faddr);
3433 if (faddr_sect != NULL)
3434 {
3435 for (obj_section *osect : faddr_sect->objfile->sections ())
3436 {
3437 if (strcmp (osect->the_bfd_section->name, ".dynamic") == 0)
3438 {
3439 CORE_ADDR addr = osect->addr ();
3440 CORE_ADDR endaddr = osect->endaddr ();
3441
3442 while (addr < endaddr)
3443 {
3444 int status;
3445 LONGEST tag;
3446 gdb_byte buf[8];
3447
3448 status = target_read_memory (addr, buf, sizeof (buf));
3449 if (status != 0)
3450 break;
3451 tag = extract_signed_integer (buf, byte_order);
3452
3453 if (tag == DT_PLTGOT)
3454 {
3455 CORE_ADDR global_pointer;
3456
3457 status = target_read_memory (addr + 8, buf,
3458 sizeof (buf));
3459 if (status != 0)
3460 break;
3461 global_pointer
3462 = extract_unsigned_integer (buf, sizeof (buf),
3463 byte_order);
3464
3465 /* The payoff... */
3466 return global_pointer;
3467 }
3468
3469 if (tag == DT_NULL)
3470 break;
3471
3472 addr += 16;
3473 }
3474
3475 break;
3476 }
3477 }
3478 }
3479 return 0;
3480 }
3481
3482 /* Attempt to find (and return) the global pointer for the given
3483 function. We first try the find_global_pointer_from_solib routine
3484 from the gdbarch tdep vector, if provided. And if that does not
3485 work, then we try ia64_find_global_pointer_from_dynamic_section. */
3486
3487 static CORE_ADDR
3488 ia64_find_global_pointer (struct gdbarch *gdbarch, CORE_ADDR faddr)
3489 {
3490 ia64_gdbarch_tdep *tdep = gdbarch_tdep<ia64_gdbarch_tdep> (gdbarch);
3491 CORE_ADDR addr = 0;
3492
3493 if (tdep->find_global_pointer_from_solib)
3494 addr = tdep->find_global_pointer_from_solib (gdbarch, faddr);
3495 if (addr == 0)
3496 addr = ia64_find_global_pointer_from_dynamic_section (gdbarch, faddr);
3497 return addr;
3498 }
3499
3500 /* Given a function's address, attempt to find (and return) the
3501 corresponding (canonical) function descriptor. Return 0 if
3502 not found. */
3503 static CORE_ADDR
3504 find_extant_func_descr (struct gdbarch *gdbarch, CORE_ADDR faddr)
3505 {
3506 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3507 struct obj_section *faddr_sect;
3508
3509 /* Return early if faddr is already a function descriptor. */
3510 faddr_sect = find_pc_section (faddr);
3511 if (faddr_sect && strcmp (faddr_sect->the_bfd_section->name, ".opd") == 0)
3512 return faddr;
3513
3514 if (faddr_sect != NULL)
3515 {
3516 for (obj_section *osect : faddr_sect->objfile->sections ())
3517 {
3518 if (strcmp (osect->the_bfd_section->name, ".opd") == 0)
3519 {
3520 CORE_ADDR addr = osect->addr ();
3521 CORE_ADDR endaddr = osect->endaddr ();
3522
3523 while (addr < endaddr)
3524 {
3525 int status;
3526 LONGEST faddr2;
3527 gdb_byte buf[8];
3528
3529 status = target_read_memory (addr, buf, sizeof (buf));
3530 if (status != 0)
3531 break;
3532 faddr2 = extract_signed_integer (buf, byte_order);
3533
3534 if (faddr == faddr2)
3535 return addr;
3536
3537 addr += 16;
3538 }
3539
3540 break;
3541 }
3542 }
3543 }
3544 return 0;
3545 }
3546
3547 /* Attempt to find a function descriptor corresponding to the
3548 given address. If none is found, construct one on the
3549 stack using the address at fdaptr. */
3550
3551 static CORE_ADDR
3552 find_func_descr (struct regcache *regcache, CORE_ADDR faddr, CORE_ADDR *fdaptr)
3553 {
3554 struct gdbarch *gdbarch = regcache->arch ();
3555 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3556 CORE_ADDR fdesc;
3557
3558 fdesc = find_extant_func_descr (gdbarch, faddr);
3559
3560 if (fdesc == 0)
3561 {
3562 ULONGEST global_pointer;
3563 gdb_byte buf[16];
3564
3565 fdesc = *fdaptr;
3566 *fdaptr += 16;
3567
3568 global_pointer = ia64_find_global_pointer (gdbarch, faddr);
3569
3570 if (global_pointer == 0)
3571 regcache_cooked_read_unsigned (regcache,
3572 IA64_GR1_REGNUM, &global_pointer);
3573
3574 store_unsigned_integer (buf, 8, byte_order, faddr);
3575 store_unsigned_integer (buf + 8, 8, byte_order, global_pointer);
3576
3577 write_memory (fdesc, buf, 16);
3578 }
3579
3580 return fdesc;
3581 }
3582
3583 /* Use the following routine when printing out function pointers
3584 so the user can see the function address rather than just the
3585 function descriptor. */
3586 static CORE_ADDR
3587 ia64_convert_from_func_ptr_addr (struct gdbarch *gdbarch, CORE_ADDR addr,
3588 struct target_ops *targ)
3589 {
3590 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3591 struct obj_section *s;
3592 gdb_byte buf[8];
3593
3594 s = find_pc_section (addr);
3595
3596 /* check if ADDR points to a function descriptor. */
3597 if (s && strcmp (s->the_bfd_section->name, ".opd") == 0)
3598 return read_memory_unsigned_integer (addr, 8, byte_order);
3599
3600 /* Normally, functions live inside a section that is executable.
3601 So, if ADDR points to a non-executable section, then treat it
3602 as a function descriptor and return the target address iff
3603 the target address itself points to a section that is executable.
3604 Check first the memory of the whole length of 8 bytes is readable. */
3605 if (s && (s->the_bfd_section->flags & SEC_CODE) == 0
3606 && target_read_memory (addr, buf, 8) == 0)
3607 {
3608 CORE_ADDR pc = extract_unsigned_integer (buf, 8, byte_order);
3609 struct obj_section *pc_section = find_pc_section (pc);
3610
3611 if (pc_section && (pc_section->the_bfd_section->flags & SEC_CODE))
3612 return pc;
3613 }
3614
3615 /* There are also descriptors embedded in vtables. */
3616 if (s)
3617 {
3618 struct bound_minimal_symbol minsym;
3619
3620 minsym = lookup_minimal_symbol_by_pc (addr);
3621
3622 if (minsym.minsym
3623 && is_vtable_name (minsym.minsym->linkage_name ()))
3624 return read_memory_unsigned_integer (addr, 8, byte_order);
3625 }
3626
3627 return addr;
3628 }
3629
3630 static CORE_ADDR
3631 ia64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3632 {
3633 return sp & ~0xfLL;
3634 }
3635
3636 /* The default "allocate_new_rse_frame" ia64_infcall_ops routine for ia64. */
3637
3638 static void
3639 ia64_allocate_new_rse_frame (struct regcache *regcache, ULONGEST bsp, int sof)
3640 {
3641 ULONGEST cfm, pfs, new_bsp;
3642
3643 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
3644
3645 new_bsp = rse_address_add (bsp, sof);
3646 regcache_cooked_write_unsigned (regcache, IA64_BSP_REGNUM, new_bsp);
3647
3648 regcache_cooked_read_unsigned (regcache, IA64_PFS_REGNUM, &pfs);
3649 pfs &= 0xc000000000000000LL;
3650 pfs |= (cfm & 0xffffffffffffLL);
3651 regcache_cooked_write_unsigned (regcache, IA64_PFS_REGNUM, pfs);
3652
3653 cfm &= 0xc000000000000000LL;
3654 cfm |= sof;
3655 regcache_cooked_write_unsigned (regcache, IA64_CFM_REGNUM, cfm);
3656 }
3657
3658 /* The default "store_argument_in_slot" ia64_infcall_ops routine for
3659 ia64. */
3660
3661 static void
3662 ia64_store_argument_in_slot (struct regcache *regcache, CORE_ADDR bsp,
3663 int slotnum, gdb_byte *buf)
3664 {
3665 write_memory (rse_address_add (bsp, slotnum), buf, 8);
3666 }
3667
3668 /* The default "set_function_addr" ia64_infcall_ops routine for ia64. */
3669
3670 static void
3671 ia64_set_function_addr (struct regcache *regcache, CORE_ADDR func_addr)
3672 {
3673 /* Nothing needed. */
3674 }
3675
3676 static CORE_ADDR
3677 ia64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
3678 struct regcache *regcache, CORE_ADDR bp_addr,
3679 int nargs, struct value **args, CORE_ADDR sp,
3680 function_call_return_method return_method,
3681 CORE_ADDR struct_addr)
3682 {
3683 ia64_gdbarch_tdep *tdep = gdbarch_tdep<ia64_gdbarch_tdep> (gdbarch);
3684 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3685 int argno;
3686 struct value *arg;
3687 struct type *type;
3688 int len, argoffset;
3689 int nslots, rseslots, memslots, slotnum, nfuncargs;
3690 int floatreg;
3691 ULONGEST bsp;
3692 CORE_ADDR funcdescaddr, global_pointer;
3693 CORE_ADDR func_addr = find_function_addr (function, NULL);
3694
3695 nslots = 0;
3696 nfuncargs = 0;
3697 /* Count the number of slots needed for the arguments. */
3698 for (argno = 0; argno < nargs; argno++)
3699 {
3700 arg = args[argno];
3701 type = check_typedef (arg->type ());
3702 len = type->length ();
3703
3704 if ((nslots & 1) && slot_alignment_is_next_even (type))
3705 nslots++;
3706
3707 if (type->code () == TYPE_CODE_FUNC)
3708 nfuncargs++;
3709
3710 nslots += (len + 7) / 8;
3711 }
3712
3713 /* Divvy up the slots between the RSE and the memory stack. */
3714 rseslots = (nslots > 8) ? 8 : nslots;
3715 memslots = nslots - rseslots;
3716
3717 /* Allocate a new RSE frame. */
3718 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
3719 tdep->infcall_ops.allocate_new_rse_frame (regcache, bsp, rseslots);
3720
3721 /* We will attempt to find function descriptors in the .opd segment,
3722 but if we can't we'll construct them ourselves. That being the
3723 case, we'll need to reserve space on the stack for them. */
3724 funcdescaddr = sp - nfuncargs * 16;
3725 funcdescaddr &= ~0xfLL;
3726
3727 /* Adjust the stack pointer to it's new value. The calling conventions
3728 require us to have 16 bytes of scratch, plus whatever space is
3729 necessary for the memory slots and our function descriptors. */
3730 sp = sp - 16 - (memslots + nfuncargs) * 8;
3731 sp &= ~0xfLL; /* Maintain 16 byte alignment. */
3732
3733 /* Place the arguments where they belong. The arguments will be
3734 either placed in the RSE backing store or on the memory stack.
3735 In addition, floating point arguments or HFAs are placed in
3736 floating point registers. */
3737 slotnum = 0;
3738 floatreg = IA64_FR8_REGNUM;
3739 for (argno = 0; argno < nargs; argno++)
3740 {
3741 struct type *float_elt_type;
3742
3743 arg = args[argno];
3744 type = check_typedef (arg->type ());
3745 len = type->length ();
3746
3747 /* Special handling for function parameters. */
3748 if (len == 8
3749 && type->code () == TYPE_CODE_PTR
3750 && type->target_type ()->code () == TYPE_CODE_FUNC)
3751 {
3752 gdb_byte val_buf[8];
3753 ULONGEST faddr = extract_unsigned_integer
3754 (arg->contents ().data (), 8, byte_order);
3755 store_unsigned_integer (val_buf, 8, byte_order,
3756 find_func_descr (regcache, faddr,
3757 &funcdescaddr));
3758 if (slotnum < rseslots)
3759 tdep->infcall_ops.store_argument_in_slot (regcache, bsp,
3760 slotnum, val_buf);
3761 else
3762 write_memory (sp + 16 + 8 * (slotnum - rseslots), val_buf, 8);
3763 slotnum++;
3764 continue;
3765 }
3766
3767 /* Normal slots. */
3768
3769 /* Skip odd slot if necessary... */
3770 if ((slotnum & 1) && slot_alignment_is_next_even (type))
3771 slotnum++;
3772
3773 argoffset = 0;
3774 while (len > 0)
3775 {
3776 gdb_byte val_buf[8];
3777
3778 memset (val_buf, 0, 8);
3779 if (!ia64_struct_type_p (type) && len < 8)
3780 {
3781 /* Integral types are LSB-aligned, so we have to be careful
3782 to insert the argument on the correct side of the buffer.
3783 This is why we use store_unsigned_integer. */
3784 store_unsigned_integer
3785 (val_buf, 8, byte_order,
3786 extract_unsigned_integer (arg->contents ().data (), len,
3787 byte_order));
3788 }
3789 else
3790 {
3791 /* This is either an 8bit integral type, or an aggregate.
3792 For 8bit integral type, there is no problem, we just
3793 copy the value over.
3794
3795 For aggregates, the only potentially tricky portion
3796 is to write the last one if it is less than 8 bytes.
3797 In this case, the data is Byte0-aligned. Happy news,
3798 this means that we don't need to differentiate the
3799 handling of 8byte blocks and less-than-8bytes blocks. */
3800 memcpy (val_buf, arg->contents ().data () + argoffset,
3801 (len > 8) ? 8 : len);
3802 }
3803
3804 if (slotnum < rseslots)
3805 tdep->infcall_ops.store_argument_in_slot (regcache, bsp,
3806 slotnum, val_buf);
3807 else
3808 write_memory (sp + 16 + 8 * (slotnum - rseslots), val_buf, 8);
3809
3810 argoffset += 8;
3811 len -= 8;
3812 slotnum++;
3813 }
3814
3815 /* Handle floating point types (including HFAs). */
3816 float_elt_type = is_float_or_hfa_type (type);
3817 if (float_elt_type != NULL)
3818 {
3819 argoffset = 0;
3820 len = type->length ();
3821 while (len > 0 && floatreg < IA64_FR16_REGNUM)
3822 {
3823 gdb_byte to[IA64_FP_REGISTER_SIZE];
3824 target_float_convert (arg->contents ().data () + argoffset,
3825 float_elt_type, to,
3826 ia64_ext_type (gdbarch));
3827 regcache->cooked_write (floatreg, to);
3828 floatreg++;
3829 argoffset += float_elt_type->length ();
3830 len -= float_elt_type->length ();
3831 }
3832 }
3833 }
3834
3835 /* Store the struct return value in r8 if necessary. */
3836 if (return_method == return_method_struct)
3837 regcache_cooked_write_unsigned (regcache, IA64_GR8_REGNUM,
3838 (ULONGEST) struct_addr);
3839
3840 global_pointer = ia64_find_global_pointer (gdbarch, func_addr);
3841
3842 if (global_pointer != 0)
3843 regcache_cooked_write_unsigned (regcache, IA64_GR1_REGNUM, global_pointer);
3844
3845 /* The following is not necessary on HP-UX, because we're using
3846 a dummy code sequence pushed on the stack to make the call, and
3847 this sequence doesn't need b0 to be set in order for our dummy
3848 breakpoint to be hit. Nonetheless, this doesn't interfere, and
3849 it's needed for other OSes, so we do this unconditionaly. */
3850 regcache_cooked_write_unsigned (regcache, IA64_BR0_REGNUM, bp_addr);
3851
3852 regcache_cooked_write_unsigned (regcache, sp_regnum, sp);
3853
3854 tdep->infcall_ops.set_function_addr (regcache, func_addr);
3855
3856 return sp;
3857 }
3858
3859 static const struct ia64_infcall_ops ia64_infcall_ops =
3860 {
3861 ia64_allocate_new_rse_frame,
3862 ia64_store_argument_in_slot,
3863 ia64_set_function_addr
3864 };
3865
3866 static struct frame_id
3867 ia64_dummy_id (struct gdbarch *gdbarch, frame_info_ptr this_frame)
3868 {
3869 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3870 gdb_byte buf[8];
3871 CORE_ADDR sp, bsp;
3872
3873 get_frame_register (this_frame, sp_regnum, buf);
3874 sp = extract_unsigned_integer (buf, 8, byte_order);
3875
3876 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
3877 bsp = extract_unsigned_integer (buf, 8, byte_order);
3878
3879 if (gdbarch_debug >= 1)
3880 gdb_printf (gdb_stdlog,
3881 "dummy frame id: code %s, stack %s, special %s\n",
3882 paddress (gdbarch, get_frame_pc (this_frame)),
3883 paddress (gdbarch, sp), paddress (gdbarch, bsp));
3884
3885 return frame_id_build_special (sp, get_frame_pc (this_frame), bsp);
3886 }
3887
3888 static CORE_ADDR
3889 ia64_unwind_pc (struct gdbarch *gdbarch, frame_info_ptr next_frame)
3890 {
3891 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3892 gdb_byte buf[8];
3893 CORE_ADDR ip, psr, pc;
3894
3895 frame_unwind_register (next_frame, IA64_IP_REGNUM, buf);
3896 ip = extract_unsigned_integer (buf, 8, byte_order);
3897 frame_unwind_register (next_frame, IA64_PSR_REGNUM, buf);
3898 psr = extract_unsigned_integer (buf, 8, byte_order);
3899
3900 pc = (ip & ~0xf) | ((psr >> 41) & 3);
3901 return pc;
3902 }
3903
3904 static int
3905 ia64_print_insn (bfd_vma memaddr, struct disassemble_info *info)
3906 {
3907 info->bytes_per_line = SLOT_MULTIPLIER;
3908 return default_print_insn (memaddr, info);
3909 }
3910
3911 /* The default "size_of_register_frame" gdbarch_tdep routine for ia64. */
3912
3913 static int
3914 ia64_size_of_register_frame (frame_info_ptr this_frame, ULONGEST cfm)
3915 {
3916 return (cfm & 0x7f);
3917 }
3918
3919 static struct gdbarch *
3920 ia64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3921 {
3922 /* If there is already a candidate, use it. */
3923 arches = gdbarch_list_lookup_by_info (arches, &info);
3924 if (arches != NULL)
3925 return arches->gdbarch;
3926
3927 gdbarch *gdbarch
3928 = gdbarch_alloc (&info, gdbarch_tdep_up (new ia64_gdbarch_tdep));
3929 ia64_gdbarch_tdep *tdep = gdbarch_tdep<ia64_gdbarch_tdep> (gdbarch);
3930
3931 tdep->size_of_register_frame = ia64_size_of_register_frame;
3932
3933 /* According to the ia64 specs, instructions that store long double
3934 floats in memory use a long-double format different than that
3935 used in the floating registers. The memory format matches the
3936 x86 extended float format which is 80 bits. An OS may choose to
3937 use this format (e.g. GNU/Linux) or choose to use a different
3938 format for storing long doubles (e.g. HPUX). In the latter case,
3939 the setting of the format may be moved/overridden in an
3940 OS-specific tdep file. */
3941 set_gdbarch_long_double_format (gdbarch, floatformats_i387_ext);
3942
3943 set_gdbarch_short_bit (gdbarch, 16);
3944 set_gdbarch_int_bit (gdbarch, 32);
3945 set_gdbarch_long_bit (gdbarch, 64);
3946 set_gdbarch_long_long_bit (gdbarch, 64);
3947 set_gdbarch_float_bit (gdbarch, 32);
3948 set_gdbarch_double_bit (gdbarch, 64);
3949 set_gdbarch_long_double_bit (gdbarch, 128);
3950 set_gdbarch_ptr_bit (gdbarch, 64);
3951
3952 set_gdbarch_num_regs (gdbarch, NUM_IA64_RAW_REGS);
3953 set_gdbarch_num_pseudo_regs (gdbarch,
3954 LAST_PSEUDO_REGNUM - FIRST_PSEUDO_REGNUM);
3955 set_gdbarch_sp_regnum (gdbarch, sp_regnum);
3956 set_gdbarch_fp0_regnum (gdbarch, IA64_FR0_REGNUM);
3957
3958 set_gdbarch_register_name (gdbarch, ia64_register_name);
3959 set_gdbarch_register_type (gdbarch, ia64_register_type);
3960
3961 set_gdbarch_pseudo_register_read (gdbarch, ia64_pseudo_register_read);
3962 set_gdbarch_deprecated_pseudo_register_write (gdbarch,
3963 ia64_pseudo_register_write);
3964 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, ia64_dwarf_reg_to_regnum);
3965 set_gdbarch_register_reggroup_p (gdbarch, ia64_register_reggroup_p);
3966 set_gdbarch_convert_register_p (gdbarch, ia64_convert_register_p);
3967 set_gdbarch_register_to_value (gdbarch, ia64_register_to_value);
3968 set_gdbarch_value_to_register (gdbarch, ia64_value_to_register);
3969
3970 set_gdbarch_skip_prologue (gdbarch, ia64_skip_prologue);
3971
3972 set_gdbarch_return_value (gdbarch, ia64_return_value);
3973
3974 set_gdbarch_memory_insert_breakpoint (gdbarch,
3975 ia64_memory_insert_breakpoint);
3976 set_gdbarch_memory_remove_breakpoint (gdbarch,
3977 ia64_memory_remove_breakpoint);
3978 set_gdbarch_breakpoint_from_pc (gdbarch, ia64_breakpoint_from_pc);
3979 set_gdbarch_breakpoint_kind_from_pc (gdbarch, ia64_breakpoint_kind_from_pc);
3980 set_gdbarch_read_pc (gdbarch, ia64_read_pc);
3981 set_gdbarch_write_pc (gdbarch, ia64_write_pc);
3982
3983 /* Settings for calling functions in the inferior. */
3984 set_gdbarch_push_dummy_call (gdbarch, ia64_push_dummy_call);
3985 tdep->infcall_ops = ia64_infcall_ops;
3986 set_gdbarch_frame_align (gdbarch, ia64_frame_align);
3987 set_gdbarch_dummy_id (gdbarch, ia64_dummy_id);
3988
3989 set_gdbarch_unwind_pc (gdbarch, ia64_unwind_pc);
3990 #ifdef HAVE_LIBUNWIND_IA64_H
3991 frame_unwind_append_unwinder (gdbarch,
3992 &ia64_libunwind_sigtramp_frame_unwind);
3993 frame_unwind_append_unwinder (gdbarch, &ia64_libunwind_frame_unwind);
3994 frame_unwind_append_unwinder (gdbarch, &ia64_sigtramp_frame_unwind);
3995 libunwind_frame_set_descr (gdbarch, &ia64_libunwind_descr);
3996 #else
3997 frame_unwind_append_unwinder (gdbarch, &ia64_sigtramp_frame_unwind);
3998 #endif
3999 frame_unwind_append_unwinder (gdbarch, &ia64_frame_unwind);
4000 frame_base_set_default (gdbarch, &ia64_frame_base);
4001
4002 /* Settings that should be unnecessary. */
4003 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
4004
4005 set_gdbarch_print_insn (gdbarch, ia64_print_insn);
4006 set_gdbarch_convert_from_func_ptr_addr (gdbarch,
4007 ia64_convert_from_func_ptr_addr);
4008
4009 /* The virtual table contains 16-byte descriptors, not pointers to
4010 descriptors. */
4011 set_gdbarch_vtable_function_descriptors (gdbarch, 1);
4012
4013 /* Hook in ABI-specific overrides, if they have been registered. */
4014 gdbarch_init_osabi (info, gdbarch);
4015
4016 return gdbarch;
4017 }
4018
4019 void _initialize_ia64_tdep ();
4020 void
4021 _initialize_ia64_tdep ()
4022 {
4023 gdbarch_register (bfd_arch_ia64, ia64_gdbarch_init, NULL);
4024 }